]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Use make_shared while creating shared_ptr 7769/head
authorSomnath Roy <somnath.roy@sandisk.com>
Mon, 1 Feb 2016 19:35:26 +0000 (14:35 -0500)
committerSomnath Roy <somnath.roy@sandisk.com>
Thu, 18 Feb 2016 01:16:39 +0000 (20:16 -0500)
make_shared() will get rid of one extra 'new' call during shared_ptr
creation.It is also 20-30% faster than creating shared_ptr directly
by 'new' call.

Signed-off-by: Somnath Roy <somnath.roy@sandisk.com>
18 files changed:
src/include/encoding.h
src/kv/KeyValueDB.h
src/kv/KineticStore.h
src/kv/LevelDBStore.h
src/kv/RocksDBStore.cc
src/kv/RocksDBStore.h
src/os/ObjectStore.cc
src/os/ObjectStore.h
src/os/filestore/CollectionIndex.h
src/os/filestore/DBObjectMap.cc
src/os/filestore/DBObjectMap.h
src/os/filestore/LFNIndex.cc
src/osd/OSD.cc
src/osd/OSDMap.h
src/osd/PG.cc
src/osd/ReplicatedBackend.cc
src/osd/ReplicatedPG.cc
src/osd/ReplicatedPG.h

index 3ba53274bd2aabe9d7a3f8b30fdec41392edc995..a5a93a0996b7cbe8dca845999ca4266ceef9e8ed 100644 (file)
@@ -429,7 +429,7 @@ inline void decode(std::list<ceph::shared_ptr<T> >& ls, bufferlist::iterator& p)
   decode(n, p);
   ls.clear();
   while (n--) {
-    ceph::shared_ptr<T> v(new T);
+    ceph::shared_ptr<T> v(std::make_shared<T>());
     decode(*v, p);
     ls.push_back(v);
   }
@@ -595,7 +595,7 @@ inline void decode(std::vector<ceph::shared_ptr<T> >& v, bufferlist::iterator& p
   decode(n, p);
   v.resize(n);
   for (__u32 i=0; i<n; i++) {
-    v[i].reset(new T());
+    v[i] = std::make_shared<T>();
     decode(*v[i], p);
   }
 }
index f20b79909702f41ab654c52c045695adf7a57906..c7a83a253230330e0c9e8d36da5f6ab27eb55497 100644 (file)
@@ -247,9 +247,7 @@ public:
   }
 
   Iterator get_iterator(const std::string &prefix) {
-    return ceph::shared_ptr<IteratorImpl>(
-      new IteratorImpl(prefix, get_iterator())
-    );
+    return std::make_shared<IteratorImpl>(prefix, get_iterator());
   }
 
   WholeSpaceIterator get_snapshot_iterator() {
@@ -257,9 +255,7 @@ public:
   }
 
   Iterator get_snapshot_iterator(const std::string &prefix) {
-    return ceph::shared_ptr<IteratorImpl>(
-      new IteratorImpl(prefix, get_snapshot_iterator())
-    );
+    return std::make_shared<IteratorImpl>(prefix, get_snapshot_iterator());
   }
 
   virtual uint64_t get_estimated_size(std::map<std::string,uint64_t> &extra) = 0;
index c2be802dfab1d614c5bf4fc1ae2c928880b59905..496cb4c0b9e204565e7bf2a4c2931e8a6560d802 100644 (file)
@@ -95,8 +95,7 @@ public:
   };
 
   KeyValueDB::Transaction get_transaction() {
-    return ceph::shared_ptr< KineticTransactionImpl >(
-      new KineticTransactionImpl(this));
+    return std::make_shared<KineticTransactionImpl>(this);
   }
 
   int submit_transaction(KeyValueDB::Transaction t);
@@ -148,8 +147,7 @@ public:
 
 protected:
   WholeSpaceIterator _get_iterator() {
-    return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
-                                                               new KineticWholeSpaceIteratorImpl(kinetic_conn.get()));
+    return std::make_shared<KineticWholeSpaceIteratorImpl>(kinetic_conn.get());
   }
 
   // TODO: remove snapshots from interface
index 8260f8b351ec89ee355485b2d1aa9206b91bf3e1..766ed1960e9ee2d77748b14044584ddbd610f129 100644 (file)
@@ -199,8 +199,7 @@ public:
   };
 
   KeyValueDB::Transaction get_transaction() {
-    return ceph::shared_ptr< LevelDBTransactionImpl >(
-      new LevelDBTransactionImpl(this));
+    return std::make_shared<LevelDBTransactionImpl>(this);
   }
 
   int submit_transaction(KeyValueDB::Transaction t);
@@ -402,11 +401,8 @@ err:
 
 protected:
   WholeSpaceIterator _get_iterator() {
-    return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
-      new LevelDBWholeSpaceIteratorImpl(
-       db->NewIterator(leveldb::ReadOptions())
-      )
-    );
+    return std::make_shared<LevelDBWholeSpaceIteratorImpl>(
+       db->NewIterator(leveldb::ReadOptions()));
   }
 
   WholeSpaceIterator _get_snapshot_iterator() {
@@ -416,10 +412,9 @@ protected:
     snapshot = db->GetSnapshot();
     options.snapshot = snapshot;
 
-    return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
-      new LevelDBSnapshotIteratorImpl(db.get(), snapshot,
-       db->NewIterator(options))
-    );
+    return std::make_shared<LevelDBSnapshotIteratorImpl>(
+        db.get(), snapshot,
+       db->NewIterator(options));
   }
 
 };
index 923537cb92752674739b45073edd94aec65a625d..d8751cc4435b023a0bcbe8bc2f7299545008c728 100644 (file)
@@ -657,11 +657,8 @@ string RocksDBStore::past_prefix(const string &prefix)
 
 RocksDBStore::WholeSpaceIterator RocksDBStore::_get_iterator()
 {
-  return std::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
-    new RocksDBWholeSpaceIteratorImpl(
-      db->NewIterator(rocksdb::ReadOptions())
-    )
-  );
+  return std::make_shared<RocksDBWholeSpaceIteratorImpl>(
+        db->NewIterator(rocksdb::ReadOptions()));
 }
 
 RocksDBStore::WholeSpaceIterator RocksDBStore::_get_snapshot_iterator()
@@ -672,10 +669,8 @@ RocksDBStore::WholeSpaceIterator RocksDBStore::_get_snapshot_iterator()
   snapshot = db->GetSnapshot();
   options.snapshot = snapshot;
 
-  return std::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
-    new RocksDBSnapshotIteratorImpl(db, snapshot,
-      db->NewIterator(options))
-  );
+  return std::make_shared<RocksDBSnapshotIteratorImpl>(
+          db, snapshot, db->NewIterator(options));
 }
 
 RocksDBStore::RocksDBSnapshotIteratorImpl::~RocksDBSnapshotIteratorImpl()
index 63cbc5ad6c2ba94cac8f368ca8dd430f2078299e..41508ba6cd104a2c5c54b1bad1667182f323f2c3 100644 (file)
@@ -157,8 +157,7 @@ public:
   };
 
   KeyValueDB::Transaction get_transaction() {
-    return std::shared_ptr< RocksDBTransactionImpl >(
-      new RocksDBTransactionImpl(this));
+    return std::make_shared<RocksDBTransactionImpl>(this);
   }
 
   int submit_transaction(KeyValueDB::Transaction t);
index ba638bfad96c25e322b7ba006c6adbe68f60a4a3..f319e765688280a4c303dd0400146a6bdcef090f 100644 (file)
@@ -177,7 +177,7 @@ int ObjectStore::queue_transactions(
   Context *oncomplete,
   TrackedOpRef op = TrackedOpRef())
 {
-  RunOnDeleteRef _complete(new RunOnDelete(oncomplete));
+  RunOnDeleteRef _complete (std::make_shared<RunOnDelete>(oncomplete));
   Context *_onreadable = new Wrapper<RunOnDeleteRef>(
     onreadable, _complete);
   Context *_oncommit = new Wrapper<RunOnDeleteRef>(
index b0b952452ae8bcbcd4fe5d4417708b58e0177eca..885922f19d94ec89a9e99eadab1b2340182eb366 100644 (file)
@@ -579,7 +579,7 @@ public:
     }
     void register_on_complete(Context *c) {
       if (!c) return;
-      RunOnDeleteRef _complete(new RunOnDelete(c));
+      RunOnDeleteRef _complete (std::make_shared<RunOnDelete>(c));
       register_on_applied(new ContainerContext<RunOnDeleteRef>(_complete));
       register_on_commit(new ContainerContext<RunOnDeleteRef>(_complete));
     }
index 0d1fc30fcb36c4a8c84cdb5d5b9f151c8b86908a..942dc233e6562f529feb5ec5d4991c16d5fba414 100644 (file)
@@ -77,8 +77,8 @@ protected:
   /// Type of returned paths
   typedef ceph::shared_ptr<Path> IndexedPath;
 
-  static IndexedPath get_testing_path(string path, const coll_t& collection) {
-    return IndexedPath(new Path(path, collection));
+  static IndexedPath get_testing_path(string path, coll_t collection) {
+    return std::make_shared<Path>(path, collection);
   }
 
   static const uint32_t FLAT_INDEX_TAG = 0;
index 04340439c7a94945112855076459b718f61ea550..67e17bdb8c89d1479867155b9934c89d2e8bb947 100644 (file)
@@ -237,7 +237,7 @@ int DBObjectMap::DBObjectMapIteratorImpl::init()
       assert(0);
       return -EINVAL;
     }
-    parent_iter.reset(new DBObjectMapIteratorImpl(map, parent));
+    parent_iter = std::make_shared<DBObjectMapIteratorImpl>(map, parent);
   }
   key_iter = map->db->get_iterator(map->user_prefix(header));
   assert(key_iter);
index 400c54a1f3866a2cf9cd2c7f6fbfba16bb9b11ae..de68f3c3b69972b994588dc2e54c0787a0a67d70 100644 (file)
@@ -413,7 +413,7 @@ private:
 
   typedef ceph::shared_ptr<DBObjectMapIteratorImpl> DBObjectMapIterator;
   DBObjectMapIterator _get_iterator(Header header) {
-    return DBObjectMapIterator(new DBObjectMapIteratorImpl(this, header));
+    return std::make_shared<DBObjectMapIteratorImpl>(this, header);
   }
 
   /// sys
index 4cc09c32cc12e816c3b754dfdd2b49bd8b1ea5e2..4962d6a269b9456938ab6908765f1d71da0376e8 100644 (file)
@@ -124,7 +124,7 @@ int LFNIndex::lookup(const ghobject_t &oid,
   if (r < 0)
     goto out;
   string full_path = get_full_path(path, short_name);
-  *out_path = IndexedPath(new Path(full_path, this));
+  *out_path = std::make_shared<Path>(full_path, this);
   r = 0;
   );
 }
index 7345d535301b84d1ec3744827043d4c15378048b..9f7354b5efd5a6410a37b1af48eecc50167b4ed8 100644 (file)
@@ -2725,8 +2725,8 @@ void OSD::recursive_remove_collection(ObjectStore *store, spg_t pgid, coll_t tmp
     coll_t(),
     make_snapmapper_oid());
 
-  ceph::shared_ptr<ObjectStore::Sequencer> osr(
-    new ObjectStore::Sequencer("rm"));
+  ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
+                                      ObjectStore::Sequencer>("rm"));
   ObjectStore::Transaction t;
   SnapMapper mapper(&driver, 0, 0, 0, pgid.shard);
 
@@ -5318,8 +5318,8 @@ void OSD::do_command(Connection *con, ceph_tid_t tid, vector<string>& cmd, buffe
     cmd_getval(cct, cmdmap, "object_size", osize, (int64_t)0);
     cmd_getval(cct, cmdmap, "object_num", onum, (int64_t)0);
 
-    ceph::shared_ptr<ObjectStore::Sequencer> osr(
-      new ObjectStore::Sequencer("bench"));
+    ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
+                                        ObjectStore::Sequencer>("bench"));
 
     uint32_t duration = g_conf->osd_bench_duration;
 
index 6694e27b3bb99ac7484423be9dea5a19dd3a52cd..46b51f36a1027d24d4444621017a3ea3e53f5f56 100644 (file)
@@ -270,15 +270,15 @@ private:
             flags(0),
             num_osd(0), num_up_osd(0), num_in_osd(0),
             max_osd(0),
-            osd_addrs(new addrs_s),
-            pg_temp(new map<pg_t,vector<int32_t> >),
-            primary_temp(new map<pg_t,int32_t>),
-            osd_uuid(new vector<uuid_d>),
+            osd_addrs(std::make_shared<addrs_s>()),
+            pg_temp(std::make_shared<map<pg_t,vector<int32_t>>>()),
+            primary_temp(std::make_shared<map<pg_t,int32_t>>()),
+            osd_uuid(std::make_shared<vector<uuid_d>>()),
             cluster_snapshot_epoch(0),
             new_blacklist_entries(false),
             cached_up_osd_features(0),
             crc_defined(false), crc(0),
-            crush(new CrushWrapper) {
+            crush(std::make_shared<CrushWrapper>()) {
     memset(&fsid, 0, sizeof(fsid));
   }
 
index ac5075aaa731ed605e84efa8f6eda9691db9164e..068e3b79e4be72c2081798f429ba36afe86f0836 100644 (file)
@@ -2016,7 +2016,7 @@ void PG::all_activated_and_committed()
 
   queue_peering_event(
     CephPeeringEvtRef(
-      new CephPeeringEvt(
+      std::make_shared<CephPeeringEvt>(
         get_osdmap()->get_epoch(),
         get_osdmap()->get_epoch(),
         AllReplicasActivated())));
@@ -2719,8 +2719,8 @@ void PG::upgrade(ObjectStore *store)
   dirty_big_info = true;
   write_if_dirty(t);
 
-  ceph::shared_ptr<ObjectStore::Sequencer> osr(
-    new ObjectStore::Sequencer("upgrade"));
+  ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
+                                      ObjectStore::Sequencer>("upgrade"));
   int r = store->apply_transaction(osr.get(), std::move(t));
   if (r != 0) {
     derr << __func__ << ": apply_transaction returned "
@@ -4461,7 +4461,7 @@ void PG::scrub_finish()
   if (has_error) {
     queue_peering_event(
       CephPeeringEvtRef(
-       new CephPeeringEvt(
+       std::make_shared<CephPeeringEvt>(
          get_osdmap()->get_epoch(),
          get_osdmap()->get_epoch(),
          DoRecovery())));
@@ -4770,8 +4770,8 @@ void PG::start_flush(ObjectStore::Transaction *t,
                     list<Context *> *on_safe)
 {
   // flush in progress ops
-  FlushStateRef flush_trigger(
-    new FlushState(this, get_osdmap()->get_epoch()));
+  FlushStateRef flush_trigger (std::make_shared<FlushState>(
+                               this, get_osdmap()->get_epoch()));
   t->nop();
   flushes_in_progress++;
   on_applied->push_back(new ContainerContext<FlushStateRef>(flush_trigger));
@@ -5391,7 +5391,7 @@ void PG::queue_null(epoch_t msg_epoch,
 {
   dout(10) << "null" << dendl;
   queue_peering_event(
-    CephPeeringEvtRef(new CephPeeringEvt(msg_epoch, query_epoch,
+    CephPeeringEvtRef(std::make_shared<CephPeeringEvt>(msg_epoch, query_epoch,
                                         NullEvt())));
 }
 
@@ -5399,7 +5399,7 @@ void PG::queue_flushed(epoch_t e)
 {
   dout(10) << "flushed" << dendl;
   queue_peering_event(
-    CephPeeringEvtRef(new CephPeeringEvt(e, e,
+    CephPeeringEvtRef(std::make_shared<CephPeeringEvt>(e, e,
                                         FlushedEvt())));
 }
 
@@ -5409,7 +5409,7 @@ void PG::queue_query(epoch_t msg_epoch,
 {
   dout(10) << "handle_query " << q << " from replica " << from << dendl;
   queue_peering_event(
-    CephPeeringEvtRef(new CephPeeringEvt(msg_epoch, query_epoch,
+    CephPeeringEvtRef(std::make_shared<CephPeeringEvt>(msg_epoch, query_epoch,
                                         MQuery(from, q, query_epoch))));
 }
 
index bef96d68747fbf9a1d9491588748ade15c04940d..32595dcf44f3195ce0ee70d5b7e8f7b5cdec375d 100644 (file)
@@ -1151,7 +1151,7 @@ void ReplicatedBackend::sub_op_modify_impl(OpRequestRef op)
 
   op->mark_started();
 
-  RepModifyRef rm(new RepModify);
+  RepModifyRef rm(std::make_shared<RepModify>());
   rm->op = op;
   rm->ackerosd = ackerosd;
   rm->last_complete = get_info().last_complete;
index 3cd4bd95aa9946e1746231fda8f1ea401f0274c4..15a24b9cb0cb938b035f7bb31fa939266bb372ca 100644 (file)
@@ -2389,7 +2389,7 @@ void ReplicatedPG::do_proxy_read(OpRequestRef op)
 
   dout(10) << __func__ << " Start proxy read for " << *m << dendl;
 
-  ProxyReadOpRef prdop(new ProxyReadOp(op, soid, m->ops));
+  ProxyReadOpRef prdop(std::make_shared<ProxyReadOp>(op, soid, m->ops));
 
   ObjectOperation obj_op;
   obj_op.dup(prdop->ops);
@@ -2574,7 +2574,7 @@ void ReplicatedPG::do_proxy_write(OpRequestRef op, const hobject_t& missing_oid)
   unsigned flags = CEPH_OSD_FLAG_IGNORE_CACHE | CEPH_OSD_FLAG_IGNORE_OVERLAY;
   dout(10) << __func__ << " Start proxy write for " << *m << dendl;
 
-  ProxyWriteOpRef pwop(new ProxyWriteOp(op, soid, m->ops, m->get_reqid()));
+  ProxyWriteOpRef pwop(std::make_shared<ProxyWriteOp>(op, soid, m->ops, m->get_reqid()));
   pwop->ctx = new OpContext(op, m->get_reqid(), pwop->ops, this);
   pwop->mtime = m->get_mtime();
 
@@ -3075,7 +3075,7 @@ void ReplicatedPG::do_scan(
                << ratio << ", which exceeds " << full_ratio << dendl;
        queue_peering_event(
          CephPeeringEvtRef(
-           new CephPeeringEvt(
+           std::make_shared<CephPeeringEvt>(
              get_osdmap()->get_epoch(),
              get_osdmap()->get_epoch(),
              BackfillTooFull())));
@@ -3153,7 +3153,7 @@ void ReplicatedPG::do_backfill(OpRequestRef op)
       osd->send_message_osd_cluster(reply, m->get_connection());
       queue_peering_event(
        CephPeeringEvtRef(
-         new CephPeeringEvt(
+         std::make_shared<CephPeeringEvt>(
            get_osdmap()->get_epoch(),
            get_osdmap()->get_epoch(),
            RecoveryDone())));
@@ -6957,7 +6957,7 @@ void ReplicatedPG::start_copy(CopyCallback *cb, ObjectContextRef obc,
     cancel_copy(cop, false);
   }
 
-  CopyOpRef cop(new CopyOp(cb, obc, src, oloc, version, flags,
+  CopyOpRef cop(std::make_shared<CopyOp>(cb, obc, src, oloc, version, flags,
                           mirror_snapset, src_obj_fadvise_flags,
                           dest_obj_fadvise_flags));
   copy_ops[dest] = cop;
@@ -7810,7 +7810,7 @@ int ReplicatedPG::start_flush(
       NULL /* no callback, we'll rely on the ordering w.r.t the next op */);
   }
 
-  FlushOpRef fop(new FlushOp);
+  FlushOpRef fop(std::make_shared<FlushOp>());
   fop->obc = obc;
   fop->flushed_version = oi.user_version;
   fop->blocking = blocking;
@@ -9659,7 +9659,7 @@ void ReplicatedPG::on_activate()
     dout(10) << "activate not all replicas are up-to-date, queueing recovery" << dendl;
     queue_peering_event(
       CephPeeringEvtRef(
-       new CephPeeringEvt(
+       std::make_shared<CephPeeringEvt>(
          get_osdmap()->get_epoch(),
          get_osdmap()->get_epoch(),
          DoRecovery())));
@@ -9667,7 +9667,7 @@ void ReplicatedPG::on_activate()
     dout(10) << "activate queueing backfill" << dendl;
     queue_peering_event(
       CephPeeringEvtRef(
-       new CephPeeringEvt(
+       std::make_shared<CephPeeringEvt>(
          get_osdmap()->get_epoch(),
          get_osdmap()->get_epoch(),
          RequestBackfill())));
@@ -9675,7 +9675,7 @@ void ReplicatedPG::on_activate()
     dout(10) << "activate all replicas clean, no recovery" << dendl;
     queue_peering_event(
       CephPeeringEvtRef(
-       new CephPeeringEvt(
+       std::make_shared<CephPeeringEvt>(
          get_osdmap()->get_epoch(),
          get_osdmap()->get_epoch(),
          AllReplicasRecovered())));
@@ -10027,7 +10027,7 @@ bool ReplicatedPG::start_recovery_ops(
        backfill_reserving = true;
        queue_peering_event(
          CephPeeringEvtRef(
-           new CephPeeringEvt(
+           std::make_shared<CephPeeringEvt>(
              get_osdmap()->get_epoch(),
              get_osdmap()->get_epoch(),
              RequestBackfill())));
@@ -10080,7 +10080,7 @@ bool ReplicatedPG::start_recovery_ops(
       dout(10) << "recovery done, queuing backfill" << dendl;
       queue_peering_event(
         CephPeeringEvtRef(
-          new CephPeeringEvt(
+          std::make_shared<CephPeeringEvt>(
             get_osdmap()->get_epoch(),
             get_osdmap()->get_epoch(),
             RequestBackfill())));
@@ -10088,7 +10088,7 @@ bool ReplicatedPG::start_recovery_ops(
       dout(10) << "recovery done, no backfill" << dendl;
       queue_peering_event(
         CephPeeringEvtRef(
-          new CephPeeringEvt(
+          std::make_shared<CephPeeringEvt>(
             get_osdmap()->get_epoch(),
             get_osdmap()->get_epoch(),
             AllReplicasRecovered())));
@@ -10098,7 +10098,7 @@ bool ReplicatedPG::start_recovery_ops(
     dout(10) << "recovery done, backfill done" << dendl;
     queue_peering_event(
       CephPeeringEvtRef(
-        new CephPeeringEvt(
+        std::make_shared<CephPeeringEvt>(
           get_osdmap()->get_epoch(),
           get_osdmap()->get_epoch(),
           Backfilled())));
index 0ee87a7f5e6603b5826a662968f02cc5ee39872c..00d1826af23e19ed99a7139dde9d2478eba65431 100644 (file)
@@ -167,7 +167,7 @@ public:
       results.mirror_snapset = mirror_snapset;
     }
   };
-  typedef boost::shared_ptr<CopyOp> CopyOpRef;
+  typedef ceph::shared_ptr<CopyOp> CopyOpRef;
 
   /**
    * The CopyCallback class defines an interface for completions to the
@@ -212,7 +212,7 @@ public:
        user_version(0), data_offset(0),
        canceled(false) { }
   };
-  typedef boost::shared_ptr<ProxyReadOp> ProxyReadOpRef;
+  typedef ceph::shared_ptr<ProxyReadOp> ProxyReadOpRef;
 
   struct ProxyWriteOp {
     OpContext *ctx;
@@ -234,7 +234,7 @@ public:
        sent_ack(false), canceled(false),
         reqid(_reqid) { }
   };
-  typedef boost::shared_ptr<ProxyWriteOp> ProxyWriteOpRef;
+  typedef ceph::shared_ptr<ProxyWriteOp> ProxyWriteOpRef;
 
   struct FlushOp {
     ObjectContextRef obc;       ///< obc we are flushing
@@ -253,7 +253,7 @@ public:
        on_flush(NULL) {}
     ~FlushOp() { assert(!on_flush); }
   };
-  typedef boost::shared_ptr<FlushOp> FlushOpRef;
+  typedef ceph::shared_ptr<FlushOp> FlushOpRef;
 
   boost::scoped_ptr<PGBackend> pgbackend;
   PGBackend *get_pgbackend() {