]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cleanup: reduced number of pointer indirection in osd 567/head
authorRoald J. van Loon <roaldvanloon@gmail.com>
Sat, 31 Aug 2013 21:10:14 +0000 (23:10 +0200)
committerRoald J. van Loon <roaldvanloon@gmail.com>
Thu, 5 Sep 2013 19:02:47 +0000 (21:02 +0200)
Signed-off-by: Roald J. van Loon <roaldvanloon@gmail.com>
src/osd/OSD.cc
src/osd/OSD.h
src/osd/PG.cc
src/osd/PG.h
src/osd/ReplicatedPG.cc

index 7917d4754d8ca84bd4c237324ccc6bf841bb7d0f..b7782471f5983dc55fc0f6661306542c6145c738 100644 (file)
@@ -156,6 +156,7 @@ static CompatSet get_osd_compat_set() {
 
 OSDService::OSDService(OSD *osd) :
   osd(osd),
+  cct(osd->cct),
   whoami(osd->whoami), store(osd->store), clog(osd->clog),
   pg_recovery_stats(osd->pg_recovery_stats),
   infos_oid(OSD::make_infos_oid()),
@@ -186,17 +187,17 @@ OSDService::OSDService(OSD *osd) :
   watch_timer(osd->client_messenger->cct, watch_lock),
   next_notif_id(0),
   backfill_request_lock("OSD::backfill_request_lock"),
-  backfill_request_timer(osd->cct, backfill_request_lock, false),
+  backfill_request_timer(cct, backfill_request_lock, false),
   last_tid(0),
   tid_lock("OSDService::tid_lock"),
-  reserver_finisher(osd->cct),
-  local_reserver(&reserver_finisher, osd->cct->_conf->osd_max_backfills),
-  remote_reserver(&reserver_finisher, osd->cct->_conf->osd_max_backfills),
+  reserver_finisher(cct),
+  local_reserver(&reserver_finisher, cct->_conf->osd_max_backfills),
+  remote_reserver(&reserver_finisher, cct->_conf->osd_max_backfills),
   pg_temp_lock("OSDService::pg_temp_lock"),
   map_cache_lock("OSDService::map_lock"),
-  map_cache(osd->cct->_conf->osd_map_cache_size),
-  map_bl_cache(osd->cct->_conf->osd_map_cache_size),
-  map_bl_inc_cache(osd->cct->_conf->osd_map_cache_size),
+  map_cache(cct->_conf->osd_map_cache_size),
+  map_bl_cache(cct->_conf->osd_map_cache_size),
+  map_bl_inc_cache(cct->_conf->osd_map_cache_size),
   in_progress_split_lock("OSDService::in_progress_split_lock"),
   full_status_lock("OSDService::full_status_lock"),
   cur_state(NONE),
@@ -2517,14 +2518,14 @@ void OSD::project_pg_history(pg_t pgid, pg_history_t& h, epoch_t from,
 
 float OSDService::get_full_ratio()
 {
-  float full_ratio = osd->cct->_conf->osd_failsafe_full_ratio;
+  float full_ratio = cct->_conf->osd_failsafe_full_ratio;
   if (full_ratio > 1.0) full_ratio /= 100.0;
   return full_ratio;
 }
 
 float OSDService::get_nearfull_ratio()
 {
-  float nearfull_ratio = osd->cct->_conf->osd_failsafe_nearfull_ratio;
+  float nearfull_ratio = cct->_conf->osd_failsafe_nearfull_ratio;
   if (nearfull_ratio > 1.0) nearfull_ratio /= 100.0;
   return nearfull_ratio;
 }
@@ -2552,7 +2553,7 @@ void OSDService::check_nearfull_warning(const osd_stat_t &osd_stat)
 
   if (cur_state != new_state) {
     cur_state = new_state;
-  } else if (now - last_msg < osd->cct->_conf->osd_op_complaint_time) {
+  } else if (now - last_msg < cct->_conf->osd_op_complaint_time) {
     return;
   }
   last_msg = now;
@@ -2574,7 +2575,7 @@ bool OSDService::too_full_for_backfill(double *_ratio, double *_max_ratio)
 {
   Mutex::Locker l(full_status_lock);
   double max_ratio;
-  max_ratio = osd->cct->_conf->osd_backfill_full_ratio;
+  max_ratio = cct->_conf->osd_backfill_full_ratio;
   if (_ratio)
     *_ratio = cur_ratio;
   if (_max_ratio)
@@ -3214,7 +3215,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
 
     string poolstr;
 
-    cmd_getval(service->osd->cct, cmdmap, "pool", poolstr);
+    cmd_getval(service->cct, cmdmap, "pool", poolstr);
     pool = curmap->const_lookup_pg_pool_name(poolstr.c_str());
     //If we can't find it by name then maybe id specified
     if (pool < 0 && isdigit(poolstr[0]))
@@ -3225,7 +3226,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
     }
     r = -1;
     string objname, nspace;
-    cmd_getval(service->osd->cct, cmdmap, "objname", objname);
+    cmd_getval(service->cct, cmdmap, "objname", objname);
     std::size_t found = objname.find_first_of('/');
     if (found != string::npos) {
       nspace = objname.substr(0, found);
@@ -3247,8 +3248,8 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
       map<string, bufferlist> newattrs;
       bufferlist val;
       string key, valstr;
-      cmd_getval(service->osd->cct, cmdmap, "key", key);
-      cmd_getval(service->osd->cct, cmdmap, "val", valstr);
+      cmd_getval(service->cct, cmdmap, "key", key);
+      cmd_getval(service->cct, cmdmap, "val", valstr);
 
       val.append(valstr);
       newattrs[key] = val;
@@ -3261,7 +3262,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
     } else if (command == "rmomapkey") {
       string key;
       set<string> keys;
-      cmd_getval(service->osd->cct, cmdmap, "key", key);
+      cmd_getval(service->cct, cmdmap, "key", key);
 
       keys.insert(key);
       t.omap_rmkeys(coll_t(pgid), obj, keys);
@@ -3274,7 +3275,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
       bufferlist newheader;
       string headerstr;
 
-      cmd_getval(service->osd->cct, cmdmap, "header", headerstr);
+      cmd_getval(service->cct, cmdmap, "header", headerstr);
       newheader.append(headerstr);
       t.omap_setheader(coll_t(pgid), obj, newheader);
       r = store->apply_transaction(t);
@@ -3298,7 +3299,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
       }
     } else if (command == "truncobj") {
       int64_t trunclen;
-      cmd_getval(service->osd->cct, cmdmap, "len", trunclen);
+      cmd_getval(service->cct, cmdmap, "len", trunclen);
       t.truncate(coll_t(pgid), obj, trunclen);
       r = store->apply_transaction(t);
       if (r < 0)
@@ -3388,12 +3389,12 @@ void OSD::RemoveWQ::_process(pair<PGRef, DeletingStateRef> item)
 
   if (pg->have_temp_coll()) {
     bool cont = remove_dir(
-      pg->osd->osd->cct, store, &mapper, &driver, pg->osr.get(), pg->get_temp_coll(), item.second);
+      pg->cct, store, &mapper, &driver, pg->osr.get(), pg->get_temp_coll(), item.second);
     if (!cont)
       return;
   }
   bool cont = remove_dir(
-      pg->osd->osd->cct, store, &mapper, &driver, pg->osr.get(), coll, item.second);
+      pg->cct, store, &mapper, &driver, pg->osr.get(), coll, item.second);
   if (!cont)
     return;
 
@@ -4871,13 +4872,13 @@ bool OSDService::inc_scrubs_pending()
   bool result = false;
 
   sched_scrub_lock.Lock();
-  if (scrubs_pending + scrubs_active < osd->cct->_conf->osd_max_scrubs) {
+  if (scrubs_pending + scrubs_active < cct->_conf->osd_max_scrubs) {
     dout(20) << "inc_scrubs_pending " << scrubs_pending << " -> " << (scrubs_pending+1)
-            << " (max " << osd->cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
+            << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
     result = true;
     ++scrubs_pending;
   } else {
-    dout(20) << "inc_scrubs_pending " << scrubs_pending << " + " << scrubs_active << " active >= max " << osd->cct->_conf->osd_max_scrubs << dendl;
+    dout(20) << "inc_scrubs_pending " << scrubs_pending << " + " << scrubs_active << " active >= max " << cct->_conf->osd_max_scrubs << dendl;
   }
   sched_scrub_lock.Unlock();
 
@@ -4888,7 +4889,7 @@ void OSDService::dec_scrubs_pending()
 {
   sched_scrub_lock.Lock();
   dout(20) << "dec_scrubs_pending " << scrubs_pending << " -> " << (scrubs_pending-1)
-          << " (max " << osd->cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
+          << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
   --scrubs_pending;
   assert(scrubs_pending >= 0);
   sched_scrub_lock.Unlock();
@@ -4901,12 +4902,12 @@ void OSDService::inc_scrubs_active(bool reserved)
   if (reserved) {
     --(scrubs_pending);
     dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active
-            << " (max " << osd->cct->_conf->osd_max_scrubs
+            << " (max " << cct->_conf->osd_max_scrubs
             << ", pending " << (scrubs_pending+1) << " -> " << scrubs_pending << ")" << dendl;
     assert(scrubs_pending >= 0);
   } else {
     dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active
-            << " (max " << osd->cct->_conf->osd_max_scrubs
+            << " (max " << cct->_conf->osd_max_scrubs
             << ", pending " << scrubs_pending << ")" << dendl;
   }
   sched_scrub_lock.Unlock();
@@ -4916,7 +4917,7 @@ void OSDService::dec_scrubs_active()
 {
   sched_scrub_lock.Lock();
   dout(20) << "dec_scrubs_active " << scrubs_active << " -> " << (scrubs_active-1)
-          << " (max " << osd->cct->_conf->osd_max_scrubs << ", pending " << scrubs_pending << ")" << dendl;
+          << " (max " << cct->_conf->osd_max_scrubs << ", pending " << scrubs_pending << ")" << dendl;
   --scrubs_active;
   sched_scrub_lock.Unlock();
 }
@@ -4935,10 +4936,10 @@ bool OSDService::prepare_to_stop()
                                              osdmap->get_epoch(),
                                              false
                                              ));
-    utime_t now = ceph_clock_now(osd->cct);
+    utime_t now = ceph_clock_now(cct);
     utime_t timeout;
-    timeout.set_from_double(now + osd->cct->_conf->osd_mon_shutdown_timeout);
-    while ((ceph_clock_now(osd->cct) < timeout) &&
+    timeout.set_from_double(now + cct->_conf->osd_mon_shutdown_timeout);
+    while ((ceph_clock_now(cct) < timeout) &&
           (state != STOPPING)) {
       is_stopping_cond.WaitUntil(is_stopping_lock, timeout);
     }
@@ -5706,7 +5707,7 @@ OSDMapRef OSDService::_add_map(OSDMap *o)
 {
   epoch_t e = o->get_epoch();
 
-  if (osd->cct->_conf->osd_map_dedup) {
+  if (cct->_conf->osd_map_dedup) {
     // Dedup against an existing map at a nearby epoch
     OSDMapRef for_dedup = map_cache.lower_bound(e);
     if (for_dedup) {
index 882be9631adad5db07c19ffb2bb2a778079af75d..e537249e8d668af6ce420b0436de3c72ed59960f 100644 (file)
@@ -27,6 +27,7 @@
 #include "common/WorkQueue.h"
 #include "common/LogClient.h"
 #include "common/AsyncReserver.h"
+#include "common/ceph_context.h"
 
 #include "os/ObjectStore.h"
 #include "OSDCap.h"
@@ -282,6 +283,7 @@ class OSD;
 class OSDService {
 public:
   OSD *osd;
+  CephContext *cct;
   SharedPtrRegistry<pg_t, ObjectStore::Sequencer> osr_registry;
   SharedPtrRegistry<pg_t, DeletingState> deleting_pgs;
   const int whoami;
index a5598a52812e325c7e35ec7636bb1183b4a211f3..e2fb09f6b5a506f596c9dc49e99b2ed8e7c4ddb1 100644 (file)
@@ -143,6 +143,7 @@ PG::PG(OSDService *o, OSDMapRef curmap,
        const PGPool &_pool, pg_t p, const hobject_t& loid,
        const hobject_t& ioid) :
   osd(o),
+  cct(o->cct),
   osdriver(osd->store, coll_t(), OSD::make_snapmapper_oid()),
   snap_mapper(
     &osdriver,
@@ -159,7 +160,7 @@ PG::PG(OSDService *o, OSDMapRef curmap,
   deleting(false), dirty_info(false), dirty_big_info(false),
   info(p),
   info_struct_v(0),
-  coll(p), pg_log(o->osd->cct), log_oid(loid), biginfo_oid(ioid),
+  coll(p), pg_log(cct), log_oid(loid), biginfo_oid(ioid),
   recovery_item(this), scrub_item(this), scrub_finalize_item(this), snap_trim_item(this), stat_queue_item(this),
   recovery_ops_active(0),
   waiting_on_backfill(0),
@@ -1095,7 +1096,7 @@ void PG::activate(ObjectStore::Transaction& t,
   if (is_primary() &&
       pool.info.crash_replay_interval > 0 &&
       may_need_replay(get_osdmap())) {
-    replay_until = ceph_clock_now(osd->osd->cct);
+    replay_until = ceph_clock_now(cct);
     replay_until += pool.info.crash_replay_interval;
     dout(10) << "activate starting replay interval for " << pool.info.crash_replay_interval
             << " until " << replay_until << dendl;
@@ -1224,7 +1225,7 @@ void PG::activate(ObjectStore::Transaction& t,
        m = new MOSDPGLog(get_osdmap()->get_epoch(), pi);
 
        // send some recent log, so that op dup detection works well.
-       m->log.copy_up_to(pg_log.get_log(), osd->osd->cct->_conf->osd_min_pg_log_entries);
+       m->log.copy_up_to(pg_log.get_log(), cct->_conf->osd_min_pg_log_entries);
        m->info.log_tail = m->log.tail;
        pi.log_tail = m->log.tail;  // sigh...
 
@@ -1919,7 +1920,7 @@ void PG::publish_stats_to_osd()
     else
       state_clear(PG_STATE_INCONSISTENT);
 
-    utime_t now = ceph_clock_now(osd->osd->cct);
+    utime_t now = ceph_clock_now(cct);
     info.stats.last_fresh = now;
     if (info.stats.state != state) {
       info.stats.state = state;
@@ -2626,8 +2627,8 @@ bool PG::sched_scrub()
     return false;
   }
 
-  bool time_for_deep = (ceph_clock_now(osd->osd->cct) >
-    info.history.last_deep_scrub_stamp + osd->osd->cct->_conf->osd_deep_scrub_interval);
+  bool time_for_deep = (ceph_clock_now(cct) >
+    info.history.last_deep_scrub_stamp + cct->_conf->osd_deep_scrub_interval);
  
   //NODEEP_SCRUB so ignore time initiated deep-scrub
   if (osd->osd->get_osdmap()->test_flag(CEPH_OSDMAP_NODEEP_SCRUB))
@@ -2779,7 +2780,7 @@ void PG::_scan_list(
         int r;
         __u64 pos = 0;
         while ( (r = osd->store->read(coll, poid, pos,
-                                       osd->osd->cct->_conf->osd_deep_scrub_stride, bl,
+                                       cct->_conf->osd_deep_scrub_stride, bl,
                                      true)) > 0) {
          handle.reset_tp_timeout();
           h << bl;
@@ -2813,8 +2814,8 @@ void PG::_scan_list(
         assert(iter);
        uint64_t keys_scanned = 0;
         for (iter->seek_to_first(); iter->valid() ; iter->next()) {
-         if (osd->osd->cct->_conf->osd_scan_list_ping_tp_interval &&
-             (keys_scanned % osd->osd->cct->_conf->osd_scan_list_ping_tp_interval == 0)) {
+         if (cct->_conf->osd_scan_list_ping_tp_interval &&
+             (keys_scanned % cct->_conf->osd_scan_list_ping_tp_interval == 0)) {
            handle.reset_tp_timeout();
          }
          ++keys_scanned;
@@ -2970,7 +2971,7 @@ void PG::schedule_backfill_full_retry()
 {
   Mutex::Locker lock(osd->backfill_request_lock);
   osd->backfill_request_timer.add_event_after(
-    osd->osd->cct->_conf->osd_backfill_retry_interval,
+    cct->_conf->osd_backfill_retry_interval,
     new QueuePeeringEvt<RequestBackfill>(
       this, get_osdmap()->get_epoch(),
       RequestBackfill()));
@@ -3630,8 +3631,8 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle)
           while (!boundary_found) {
             vector<hobject_t> objects;
             ret = osd->store->collection_list_partial(coll, start,
-                                                      osd->osd->cct->_conf->osd_scrub_chunk_min,
-                                                     osd->osd->cct->_conf->osd_scrub_chunk_max,
+                                                      cct->_conf->osd_scrub_chunk_min,
+                                                     cct->_conf->osd_scrub_chunk_max,
                                                      0,
                                                       &objects, &scrubber.end);
             assert(ret >= 0);
@@ -4214,7 +4215,7 @@ void PG::scrub_finish()
 
   // finish up
   unreg_next_scrub();
-  utime_t now = ceph_clock_now(osd->osd->cct);
+  utime_t now = ceph_clock_now(cct);
   info.history.last_scrub = info.last_update;
   info.history.last_scrub_stamp = now;
   if (scrubber.deep) {
@@ -4688,7 +4689,7 @@ void PG::proc_primary_info(ObjectStore::Transaction &t, const pg_info_t &oinfo)
 
   if (last_complete_ondisk.epoch >= info.history.last_epoch_started) {
     // DEBUG: verify that the snaps are empty in snap_mapper
-    if (osd->osd->cct->_conf->osd_debug_verify_snaps_on_info) {
+    if (cct->_conf->osd_debug_verify_snaps_on_info) {
       interval_set<snapid_t> p;
       p.union_of(oinfo.purged_snaps, info.purged_snaps);
       p.subtract(info.purged_snaps);
@@ -5063,7 +5064,7 @@ void PG::handle_activate_map(RecoveryCtx *rctx)
   ActMap evt;
   recovery_state.handle_event(evt, rctx);
   if (osdmap_ref->get_epoch() - last_persisted_osdmap_ref->get_epoch() >
-    osd->osd->cct->_conf->osd_pg_epoch_persisted_max_stale) {
+    cct->_conf->osd_pg_epoch_persisted_max_stale) {
     dout(20) << __func__ << ": Dirtying info: last_persisted is "
             << last_persisted_osdmap_ref->get_epoch()
             << " while current is " << osdmap_ref->get_epoch() << dendl;
@@ -5172,7 +5173,7 @@ void PG::RecoveryState::Initial::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_initial_latency, dur);
 }
 
@@ -5221,7 +5222,7 @@ void PG::RecoveryState::Started::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_started_latency, dur);
 }
 
@@ -5296,7 +5297,7 @@ void PG::RecoveryState::Reset::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_reset_latency, dur);
 }
 
@@ -5321,7 +5322,7 @@ void PG::RecoveryState::Start::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_start_latency, dur);
 }
 
@@ -5370,7 +5371,7 @@ void PG::RecoveryState::Primary::exit()
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
   pg->want_acting.clear();
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_primary_latency, dur);
 }
 
@@ -5458,7 +5459,7 @@ void PG::RecoveryState::Peering::exit()
   pg->state_clear(PG_STATE_PEERING);
   pg->clear_probe_targets();
 
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_peering_latency, dur);
 }
 
@@ -5496,7 +5497,7 @@ void PG::RecoveryState::Backfilling::exit()
   pg->backfill_reserved = false;
   pg->backfill_reserving = false;
   pg->state_clear(PG_STATE_BACKFILL);
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_backfilling_latency, dur);
 }
 
@@ -5531,7 +5532,7 @@ void PG::RecoveryState::WaitRemoteBackfillReserved::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_waitremotebackfillreserved_latency, dur);
 }
 
@@ -5576,7 +5577,7 @@ void PG::RecoveryState::WaitLocalBackfillReserved::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_waitlocalbackfillreserved_latency, dur);
 }
 
@@ -5592,7 +5593,7 @@ void PG::RecoveryState::NotBackfilling::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_notbackfilling_latency, dur);
 }
 
@@ -5608,7 +5609,7 @@ void PG::RecoveryState::RepNotRecovering::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_repnotrecovering_latency, dur);
 }
 
@@ -5645,7 +5646,7 @@ void PG::RecoveryState::RepWaitRecoveryReserved::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_repwaitrecoveryreserved_latency, dur);
 }
 
@@ -5664,7 +5665,7 @@ PG::RecoveryState::RepNotRecovering::react(const RequestBackfillPrio &evt)
 
   double ratio, max_ratio;
   if (pg->osd->too_full_for_backfill(&ratio, &max_ratio) &&
-      !pg->osd->osd->cct->_conf->osd_debug_skip_full_check_in_backfill_reservation) {
+      !pg->cct->_conf->osd_debug_skip_full_check_in_backfill_reservation) {
     dout(10) << "backfill reservation rejected: full ratio is "
             << ratio << ", which is greater than max allowed ratio "
             << max_ratio << dendl;
@@ -5683,7 +5684,7 @@ void PG::RecoveryState::RepWaitBackfillReserved::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_repwaitbackfillreserved_latency, dur);
 }
 
@@ -5730,7 +5731,7 @@ void PG::RecoveryState::RepRecovering::exit()
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
   pg->osd->remote_reserver.cancel_reservation(pg->info.pgid);
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_RepRecovering_latency, dur);
 }
 
@@ -5746,7 +5747,7 @@ void PG::RecoveryState::Activating::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_activating_latency, dur);
 }
 
@@ -5768,7 +5769,7 @@ void PG::RecoveryState::WaitLocalRecoveryReserved::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_waitlocalrecoveryreserved_latency, dur);
 }
 
@@ -5815,7 +5816,7 @@ void PG::RecoveryState::WaitRemoteRecoveryReserved::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_waitremoterecoveryreserved_latency, dur);
 }
 
@@ -5878,7 +5879,7 @@ void PG::RecoveryState::Recovering::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_recovering_latency, dur);
 }
 
@@ -5912,7 +5913,7 @@ void PG::RecoveryState::Recovered::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_recovered_latency, dur);
 }
 
@@ -5940,7 +5941,7 @@ void PG::RecoveryState::Clean::exit()
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
   pg->state_clear(PG_STATE_CLEAN);
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_clean_latency, dur);
 }
 
@@ -6005,7 +6006,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap)
   }
 
   // if we haven't reported our PG stats in a long time, do so now.
-  if (pg->info.stats.reported_epoch + pg->osd->osd->cct->_conf->osd_pg_stat_report_interval_max < advmap.osdmap->get_epoch()) {
+  if (pg->info.stats.reported_epoch + pg->cct->_conf->osd_pg_stat_report_interval_max < advmap.osdmap->get_epoch()) {
     dout(20) << "reporting stats to osd after " << (advmap.osdmap->get_epoch() - pg->info.stats.reported_epoch)
             << " epochs" << dendl;
     pg->publish_stats_to_osd();
@@ -6026,13 +6027,13 @@ boost::statechart::result PG::RecoveryState::Active::react(const ActMap&)
     pg->discover_all_missing(*context< RecoveryMachine >().get_query_map());
   }
 
-  if (pg->osd->osd->cct->_conf->osd_check_for_log_corruption)
+  if (pg->cct->_conf->osd_check_for_log_corruption)
     pg->check_log_for_corruption(pg->osd->store);
 
   int unfound = pg->pg_log.get_missing().num_missing() - pg->missing_loc.size();
   if (unfound > 0 &&
       pg->all_unfound_are_queried_or_lost(pg->get_osdmap())) {
-    if (pg->osd->osd->cct->_conf->osd_auto_mark_unfound_lost) {
+    if (pg->cct->_conf->osd_auto_mark_unfound_lost) {
       pg->osd->clog.error() << pg->info.pgid << " has " << unfound
                            << " objects unfound and apparently lost, would automatically marking lost but NOT IMPLEMENTED\n";
       //pg->mark_all_unfound_lost(*context< RecoveryMachine >().get_cur_transaction());
@@ -6191,7 +6192,7 @@ void PG::RecoveryState::Active::exit()
   pg->state_clear(PG_STATE_BACKFILL_WAIT);
   pg->state_clear(PG_STATE_RECOVERY_WAIT);
   pg->state_clear(PG_STATE_REPLAY);
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_active_latency, dur);
 }
 
@@ -6284,7 +6285,7 @@ void PG::RecoveryState::ReplicaActive::exit()
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
   pg->osd->remote_reserver.cancel_reservation(pg->info.pgid);
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_replicaactive_latency, dur);
 }
 
@@ -6386,7 +6387,7 @@ void PG::RecoveryState::Stray::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_stray_latency, dur);
 }
 
@@ -6561,7 +6562,7 @@ void PG::RecoveryState::GetInfo::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_getinfo_latency, dur);
 }
 
@@ -6676,7 +6677,7 @@ void PG::RecoveryState::GetLog::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_getlog_latency, dur);
 }
 
@@ -6736,7 +6737,7 @@ void PG::RecoveryState::WaitActingChange::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_waitactingchange_latency, dur);
 }
 
@@ -6773,7 +6774,7 @@ void PG::RecoveryState::Incomplete::exit()
   PG *pg = context< RecoveryMachine >().pg;
 
   pg->state_clear(PG_STATE_INCOMPLETE);
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_incomplete_latency, dur);
 }
 
@@ -6898,7 +6899,7 @@ void PG::RecoveryState::GetMissing::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_getmissing_latency, dur);
 }
 
@@ -6979,7 +6980,7 @@ void PG::RecoveryState::WaitUpThru::exit()
 {
   context< RecoveryMachine >().log_exit(state_name, enter_time);
   PG *pg = context< RecoveryMachine >().pg;
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   pg->osd->recoverystate_perf->tinc(rs_waitupthru_latency, dur);
 }
 
@@ -6995,9 +6996,9 @@ void PG::RecoveryState::RecoveryMachine::log_enter(const char *state_name)
 
 void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_t enter_time)
 {
-  utime_t dur = ceph_clock_now(pg->osd->osd->cct) - enter_time;
+  utime_t dur = ceph_clock_now(pg->cct) - enter_time;
   dout(5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl;
-  pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now(pg->osd->osd->cct) - enter_time,
+  pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now(pg->cct) - enter_time,
                                      event_count, event_time);
   event_count = 0;
   event_time = utime_t();
@@ -7198,12 +7199,12 @@ void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) {
   assert(!rctx);
   rctx = new_ctx;
   if (rctx)
-    rctx->start_time = ceph_clock_now(pg->osd->osd->cct);
+    rctx->start_time = ceph_clock_now(pg->cct);
 }
 
 void PG::RecoveryState::end_handle() {
   if (rctx) {
-    utime_t dur = ceph_clock_now(pg->osd->osd->cct) - rctx->start_time;
+    utime_t dur = ceph_clock_now(pg->cct) - rctx->start_time;
     machine.event_time += dur;
   }
   machine.event_count++;
index 6b0712cbaa57b15a7c64bef3e1f8a9261593458d..7fef58adbd19b9194ad9f1e71106cb34aee1c2af 100644 (file)
@@ -46,6 +46,7 @@
 #include "common/cmdparse.h"
 #include "common/tracked_int_ptr.hpp"
 #include "common/WorkQueue.h"
+#include "common/ceph_context.h"
 #include "include/str_list.h"
 
 #include <list>
@@ -189,6 +190,7 @@ public:
   /*** PG ****/
 protected:
   OSDService *osd;
+  CephContext *cct;
   OSDriver osdriver;
   SnapMapper snap_mapper;
 public:
index 984602a46d5d5ce6df792471286a1e8182af3cd0..e1f8b0cf81e5ebc346ebed9b65843be14ec9c776 100644 (file)
@@ -126,8 +126,8 @@ void ReplicatedPG::wait_for_missing_object(const hobject_t& soid, OpRequestRef o
   else {
     dout(7) << "missing " << soid << " v " << v << ", pulling." << dendl;
     map<int, vector<PullOp> > pulls;
-    prepare_pull(soid, v, osd->osd->cct->_conf->osd_client_op_priority, &pulls);
-    send_pulls(osd->osd->cct->_conf->osd_client_op_priority, pulls);
+    prepare_pull(soid, v, cct->_conf->osd_client_op_priority, &pulls);
+    send_pulls(cct->_conf->osd_client_op_priority, pulls);
   }
   waiting_for_missing_object[soid].push_back(op);
   op->mark_delayed("waiting for missing object");
@@ -184,8 +184,8 @@ void ReplicatedPG::wait_for_degraded_object(const hobject_t& soid, OpRequestRef
       }
     }
     map<int, vector<PushOp> > pushes;
-    prep_object_replica_pushes(soid, v, osd->osd->cct->_conf->osd_client_op_priority, &pushes);
-    send_pushes(osd->osd->cct->_conf->osd_client_op_priority, pushes);
+    prep_object_replica_pushes(soid, v, cct->_conf->osd_client_op_priority, &pushes);
+    send_pushes(cct->_conf->osd_client_op_priority, pushes);
   }
   waiting_for_degraded_object[soid].push_back(op);
   op->mark_delayed("waiting for degraded object");
@@ -280,14 +280,14 @@ int ReplicatedPG::do_command(cmdmap_t cmdmap, ostream& ss,
   string prefix;
   string format;
 
-  cmd_getval(osd->osd->cct, cmdmap, "format", format);
+  cmd_getval(cct, cmdmap, "format", format);
   boost::scoped_ptr<Formatter> f(new_formatter(format));
   // demand that we have a formatter
   if (!f)
     f.reset(new_formatter("json"));
 
   string command;
-  cmd_getval(osd->osd->cct, cmdmap, "cmd", command);
+  cmd_getval(cct, cmdmap, "cmd", command);
   if (command == "query") {
     f->open_object_section("pg");
     f->dump_string("state", pg_state_string(get_state()));
@@ -314,7 +314,7 @@ int ReplicatedPG::do_command(cmdmap_t cmdmap, ostream& ss,
   }
   else if (command == "mark_unfound_lost") {
     string mulcmd;
-    cmd_getval(osd->osd->cct, cmdmap, "mulcmd", mulcmd);
+    cmd_getval(cct, cmdmap, "mulcmd", mulcmd);
     if (mulcmd != "revert") {
       ss << "mode must be 'revert'; mark and delete not yet implemented";
       return -EINVAL;
@@ -345,7 +345,7 @@ int ReplicatedPG::do_command(cmdmap_t cmdmap, ostream& ss,
   else if (command == "list_missing") {
     hobject_t offset;
     string offset_json;
-    if (cmd_getval(osd->osd->cct, cmdmap, "offset", offset_json)) {
+    if (cmd_getval(cct, cmdmap, "offset", offset_json)) {
       json_spirit::Value v;
       try {
        if (!json_spirit::read(offset_json, v))
@@ -369,7 +369,7 @@ int ReplicatedPG::do_command(cmdmap_t cmdmap, ostream& ss,
       f->open_array_section("objects");
       int32_t num = 0;
       bufferlist bl;
-      while (p != missing.missing.end() && num < osd->osd->cct->_conf->osd_command_max_records) {
+      while (p != missing.missing.end() && num < cct->_conf->osd_command_max_records) {
        f->open_object_section("object");
        {
          f->open_object_section("oid");
@@ -459,7 +459,7 @@ void ReplicatedPG::do_pg_op(OpRequestRef op)
         dout(10) << " pgls pg=" << m->get_pg() << " != " << info.pgid << dendl;
        result = 0; // hmm?
       } else {
-       unsigned list_size = MIN(osd->osd->cct->_conf->osd_max_pgls, p->op.pgls.count);
+       unsigned list_size = MIN(cct->_conf->osd_max_pgls, p->op.pgls.count);
 
         dout(10) << " pgls pg=" << m->get_pg() << " count " << list_size << dendl;
        // read into a buffer
@@ -584,14 +584,14 @@ void ReplicatedPG::calc_trim_to()
     return;
   }
 
-  size_t target = osd->osd->cct->_conf->osd_min_pg_log_entries;
+  size_t target = cct->_conf->osd_min_pg_log_entries;
   if (is_degraded() ||
       state_test(PG_STATE_RECOVERING |
                 PG_STATE_RECOVERY_WAIT |
                 PG_STATE_BACKFILL |
                 PG_STATE_BACKFILL_WAIT |
                 PG_STATE_BACKFILL_TOOFULL)) {
-    target = osd->osd->cct->_conf->osd_max_pg_log_entries;
+    target = cct->_conf->osd_max_pg_log_entries;
   }
 
   if (min_last_complete_ondisk != eversion_t() &&
@@ -974,7 +974,7 @@ void ReplicatedPG::execute_ctx(OpContext *ctx)
   ctx->user_at_version = obc->obs.oi.user_version;
 
   // note my stats
-  utime_t now = ceph_clock_now(osd->osd->cct);
+  utime_t now = ceph_clock_now(cct);
 
   // note some basic context for op replication that prepare_transaction may clobber
   eversion_t old_last_update = pg_log.get_head();
@@ -1075,7 +1075,7 @@ void ReplicatedPG::execute_ctx(OpContext *ctx)
   append_log(ctx->log, pg_trim_to, ctx->local_t);
   
   // verify that we are doing this in order?
-  if (osd->osd->cct->_conf->osd_debug_op_order && m->get_source().is_client()) {
+  if (cct->_conf->osd_debug_op_order && m->get_source().is_client()) {
     map<client_t,tid_t>& cm = debug_op_order[obc->obs.oi.soid];
     tid_t t = m->get_tid();
     client_t n = m->get_source().num();
@@ -1123,7 +1123,7 @@ void ReplicatedPG::log_op_stats(OpContext *ctx)
   OpRequestRef op = ctx->op;
   MOSDOp *m = static_cast<MOSDOp*>(op->request);
 
-  utime_t now = ceph_clock_now(osd->osd->cct);
+  utime_t now = ceph_clock_now(cct);
   utime_t latency = now;
   latency -= ctx->op->request->get_recv_stamp();
 
@@ -1169,7 +1169,7 @@ void ReplicatedPG::log_op_stats(OpContext *ctx)
 
 void ReplicatedPG::log_subop_stats(OpRequestRef op, int tag_inb, int tag_lat)
 {
-  utime_t now = ceph_clock_now(osd->osd->cct);
+  utime_t now = ceph_clock_now(cct);
   utime_t latency = now;
   latency -= op->request->get_recv_stamp();
 
@@ -1286,8 +1286,8 @@ void ReplicatedPG::do_scan(
       BackfillInterval bi;
       osr->flush();
       scan_range(
-       m->begin, osd->osd->cct->_conf->osd_backfill_scan_min,
-       osd->osd->cct->_conf->osd_backfill_scan_max, &bi, handle);
+       m->begin, cct->_conf->osd_backfill_scan_min,
+       cct->_conf->osd_backfill_scan_max, &bi, handle);
       MOSDPGScan *reply = new MOSDPGScan(MOSDPGScan::OP_SCAN_DIGEST,
                                         get_osdmap()->get_epoch(), m->query_epoch,
                                         info.pgid, bi.begin, bi.end);
@@ -1353,7 +1353,7 @@ void ReplicatedPG::_do_push(OpRequestRef op)
   reply->pgid = info.pgid;
   reply->map_epoch = m->map_epoch;
   reply->replies.swap(replies);
-  reply->compute_cost(osd->osd->cct);
+  reply->compute_cost(cct);
 
   t->register_on_complete(new C_OSD_SendMessageOnConn(
                            osd, reply, m->get_connection()));
@@ -1384,7 +1384,7 @@ void ReplicatedPG::_do_pull_response(OpRequestRef op)
     reply->pgid = info.pgid;
     reply->map_epoch = m->map_epoch;
     reply->pulls.swap(replies);
-    reply->compute_cost(osd->osd->cct);
+    reply->compute_cost(cct);
 
     t->register_on_complete(new C_OSD_SendMessageOnConn(
                              osd, reply, m->get_connection()));
@@ -1442,12 +1442,12 @@ void ReplicatedPG::do_backfill(OpRequestRef op)
   case MOSDPGBackfill::OP_BACKFILL_FINISH:
     {
       assert(is_replica());
-      assert(osd->osd->cct->_conf->osd_kill_backfill_at != 1);
+      assert(cct->_conf->osd_kill_backfill_at != 1);
 
       MOSDPGBackfill *reply = new MOSDPGBackfill(MOSDPGBackfill::OP_BACKFILL_FINISH_ACK,
                                                 get_osdmap()->get_epoch(), m->query_epoch,
                                                 info.pgid);
-      reply->set_priority(osd->osd->cct->_conf->osd_recovery_op_priority);
+      reply->set_priority(cct->_conf->osd_recovery_op_priority);
       osd->send_message_osd_cluster(reply, m->get_connection());
       queue_peering_event(
        CephPeeringEvtRef(
@@ -1461,7 +1461,7 @@ void ReplicatedPG::do_backfill(OpRequestRef op)
   case MOSDPGBackfill::OP_BACKFILL_PROGRESS:
     {
       assert(is_replica());
-      assert(osd->osd->cct->_conf->osd_kill_backfill_at != 2);
+      assert(cct->_conf->osd_kill_backfill_at != 2);
 
       info.last_backfill = m->last_backfill;
       if (m->compat_stat_sum) {
@@ -1481,7 +1481,7 @@ void ReplicatedPG::do_backfill(OpRequestRef op)
   case MOSDPGBackfill::OP_BACKFILL_FINISH_ACK:
     {
       assert(is_primary());
-      assert(osd->osd->cct->_conf->osd_kill_backfill_at != 3);
+      assert(cct->_conf->osd_kill_backfill_at != 3);
       finish_recovery_op(hobject_t::get_max());
     }
     break;
@@ -1530,7 +1530,7 @@ ReplicatedPG::RepGather *ReplicatedPG::trim_object(const hobject_t &coid)
     &obc->obs,
     obc->ssc,
     this);
-  ctx->mtime = ceph_clock_now(osd->osd->cct);
+  ctx->mtime = ceph_clock_now(cct);
 
   ctx->at_version.epoch = get_osdmap()->get_epoch();
   ctx->at_version.version = pg_log.get_head().version + 1;
@@ -2115,9 +2115,9 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
     // munge ZERO -> TRUNCATE?  (don't munge to DELETE or we risk hosing attributes)
     if (op.op == CEPH_OSD_OP_ZERO &&
        obs.exists &&
-       op.extent.offset < osd->osd->cct->_conf->osd_max_object_size &&
+       op.extent.offset < cct->_conf->osd_max_object_size &&
        op.extent.length >= 1 &&
-       op.extent.length <= osd->osd->cct->_conf->osd_max_object_size &&
+       op.extent.length <= cct->_conf->osd_max_object_size &&
        op.extent.offset + op.extent.length >= oi.size) {
       if (op.extent.offset >= oi.size) {
         // no-op
@@ -2217,7 +2217,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
        uint64_t last = op.extent.offset;
         for (miter = m.begin(); miter != m.end(); ++miter) {
          // verify hole?
-         if (osd->osd->cct->_conf->osd_verify_sparse_read_holes &&
+         if (cct->_conf->osd_verify_sparse_read_holes &&
              last < miter->first) {
            bufferlist t;
            uint64_t len = miter->first - last;
@@ -2242,7 +2242,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
         }
 
        // verify trailing hole?
-       if (osd->osd->cct->_conf->osd_verify_sparse_read_holes) {
+       if (cct->_conf->osd_verify_sparse_read_holes) {
          uint64_t end = MIN(op.extent.offset + op.extent.length, oi.size);
          if (last < end) {
            bufferlist t;
@@ -2596,7 +2596,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          timeout = 0;
        }
        if (!timeout)
-         timeout = osd->osd->cct->_conf->osd_default_notify_timeout;
+         timeout = cct->_conf->osd_default_notify_timeout;
 
        notify_info_t n;
        n.timeout = timeout;
@@ -2666,7 +2666,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
            oi.truncate_size = op.extent.truncate_size;
          }
        }
-       result = check_offset_and_length(op.extent.offset, op.extent.length, osd->osd->cct->_conf->osd_max_object_size);
+       result = check_offset_and_length(op.extent.offset, op.extent.length, cct->_conf->osd_max_object_size);
        if (result < 0)
          break;
        t.write(coll, soid, op.extent.offset, op.extent.length, osd_op.indata);
@@ -2686,7 +2686,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          result = -EINVAL;
          break;
        }
-       result = check_offset_and_length(op.extent.offset, op.extent.length, osd->osd->cct->_conf->osd_max_object_size);
+       result = check_offset_and_length(op.extent.offset, op.extent.length, cct->_conf->osd_max_object_size);
        if (result < 0)
          break;
        if (obs.exists) {
@@ -2718,7 +2718,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
     case CEPH_OSD_OP_ZERO:
       ++ctx->num_write;
       { // zero
-       result = check_offset_and_length(op.extent.offset, op.extent.length, osd->osd->cct->_conf->osd_max_object_size);
+       result = check_offset_and_length(op.extent.offset, op.extent.length, cct->_conf->osd_max_object_size);
        if (result < 0)
          break;
        assert(op.extent.length);
@@ -2781,7 +2781,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          break;
        }
 
-       if (op.extent.offset > osd->osd->cct->_conf->osd_max_object_size) {
+       if (op.extent.offset > cct->_conf->osd_max_object_size) {
          result = -EFBIG;
          break;
        }
@@ -2865,7 +2865,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
        dout(10) << "watch: peer_addr="
          << ctx->op->request->get_connection()->get_peer_addr() << dendl;
 
-       watch_info_t w(cookie, osd->osd->cct->_conf->osd_client_watch_timeout,
+       watch_info_t w(cookie, cct->_conf->osd_client_watch_timeout,
          ctx->op->request->get_connection()->get_peer_addr());
        if (do_watch) {
          if (oi.watchers.count(make_pair(cookie, entity))) {
@@ -2898,7 +2898,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
     case CEPH_OSD_OP_SETXATTR:
       ++ctx->num_write;
       {
-       if (op.xattr.value_len > osd->osd->cct->_conf->osd_max_attr_size) {
+       if (op.xattr.value_len > cct->_conf->osd_max_attr_size) {
          result = -EFBIG;
          break;
        }
@@ -2996,8 +2996,8 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          }
        }
 
-       if (osd->osd->cct->_conf->osd_tmapput_sets_uses_tmap) {
-         assert(osd->osd->cct->_conf->osd_auto_upgrade_tmap);
+       if (cct->_conf->osd_tmapput_sets_uses_tmap) {
+         assert(cct->_conf->osd_auto_upgrade_tmap);
          oi.uses_tmap = true;
        }
 
@@ -3046,7 +3046,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
        }
        set<string> out_set;
 
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          dout(20) << "CEPH_OSD_OP_OMAPGETKEYS: "
                   << " Reading " << oi.soid << " omap from tmap" << dendl;
          map<string, bufferlist> vals;
@@ -3104,7 +3104,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
        }
        map<string, bufferlist> out_set;
 
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          dout(20) << "CEPH_OSD_OP_OMAPGETVALS: "
                   << " Reading " << oi.soid << " omap from tmap" << dendl;
          map<string, bufferlist> vals;
@@ -3155,7 +3155,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
     case CEPH_OSD_OP_OMAPGETHEADER:
       ++ctx->num_read;
       {
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          dout(20) << "CEPH_OSD_OP_OMAPGETHEADER: "
                   << " Reading " << oi.soid << " omap from tmap" << dendl;
          map<string, bufferlist> vals;
@@ -3186,7 +3186,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          goto fail;
        }
        map<string, bufferlist> out;
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          dout(20) << "CEPH_OSD_OP_OMAPGET: "
                   << " Reading " << oi.soid << " omap from tmap" << dendl;
          map<string, bufferlist> vals;
@@ -3285,7 +3285,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
     case CEPH_OSD_OP_OMAPSETVALS:
       ++ctx->num_write;
       {
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          _copy_up_tmap(ctx);
        }
        if (!obs.exists) {
@@ -3315,7 +3315,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
     case CEPH_OSD_OP_OMAPSETHEADER:
       ++ctx->num_write;
       {
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          _copy_up_tmap(ctx);
        }
        if (!obs.exists) {
@@ -3335,7 +3335,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          result = -ENOENT;
          break;
        }
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          _copy_up_tmap(ctx);
        }
        t.touch(coll, soid);
@@ -3351,7 +3351,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
          result = -ENOENT;
          break;
        }
-       if (oi.uses_tmap && osd->osd->cct->_conf->osd_auto_upgrade_tmap) {
+       if (oi.uses_tmap && cct->_conf->osd_auto_upgrade_tmap) {
          _copy_up_tmap(ctx);
        }
        t.touch(coll, soid);
@@ -4487,7 +4487,7 @@ void ReplicatedPG::eval_repop(RepGather *repop)
       // _prior_ to being committed; it will not get set with
       // writeahead journaling, for instance.
       if (repop->ctx->readable_stamp == utime_t())
-       repop->ctx->readable_stamp = ceph_clock_now(osd->osd->cct);
+       repop->ctx->readable_stamp = ceph_clock_now(cct);
     }
   }
 
@@ -4602,7 +4602,7 @@ ReplicatedPG::RepGather *ReplicatedPG::new_repop(OpContext *ctx, ObjectContextRe
 
   RepGather *repop = new RepGather(ctx, obc, rep_tid, info.last_complete);
 
-  repop->start = ceph_clock_now(osd->osd->cct);
+  repop->start = ceph_clock_now(cct);
 
   repop_queue.push_back(&repop->queue_item);
   repop_map[repop->rep_tid] = repop;
@@ -4801,7 +4801,7 @@ void ReplicatedPG::handle_watch_timeout(WatchRef watch)
   osd_reqid_t reqid(osd->get_cluster_msgr_name(), 0, rep_tid);
   OpContext *ctx = new OpContext(OpRequestRef(), reqid, ops,
                                 &obc->obs, obc->ssc, this);
-  ctx->mtime = ceph_clock_now(osd->osd->cct);
+  ctx->mtime = ceph_clock_now(cct);
 
   ctx->at_version.epoch = get_osdmap()->get_epoch();
   ctx->at_version.version = pg_log.get_head().version + 1;
@@ -5360,7 +5360,7 @@ void ReplicatedPG::calc_head_subsets(ObjectContextRef obc, SnapSet& snapset, con
   if (size)
     data_subset.insert(0, size);
 
-  if (!osd->osd->cct->_conf->osd_recover_clone_overlap) {
+  if (!cct->_conf->osd_recover_clone_overlap) {
     dout(10) << "calc_head_subsets " << head << " -- osd_recover_clone_overlap disabled" << dendl;
     return;
   }
@@ -5387,7 +5387,7 @@ void ReplicatedPG::calc_head_subsets(ObjectContextRef obc, SnapSet& snapset, con
   }
 
 
-  if (cloning.num_intervals() > osd->osd->cct->_conf->osd_recover_clone_overlap_limit) {
+  if (cloning.num_intervals() > cct->_conf->osd_recover_clone_overlap_limit) {
     dout(10) << "skipping clone, too many holes" << dendl;
     clone_subsets.clear();
     cloning.clear();
@@ -5414,7 +5414,7 @@ void ReplicatedPG::calc_clone_subsets(SnapSet& snapset, const hobject_t& soid,
   if (size)
     data_subset.insert(0, size);
 
-  if (!osd->osd->cct->_conf->osd_recover_clone_overlap) {
+  if (!cct->_conf->osd_recover_clone_overlap) {
     dout(10) << "calc_clone_subsets " << soid << " -- osd_recover_clone_overlap disabled" << dendl;
     return;
   }
@@ -5463,7 +5463,7 @@ void ReplicatedPG::calc_clone_subsets(SnapSet& snapset, const hobject_t& soid,
             << " overlap " << next << dendl;
   }
 
-  if (cloning.num_intervals() > osd->osd->cct->_conf->osd_recover_clone_overlap_limit) {
+  if (cloning.num_intervals() > cct->_conf->osd_recover_clone_overlap_limit) {
     dout(10) << "skipping clone, too many holes" << dendl;
     clone_subsets.clear();
     cloning.clear();
@@ -5754,7 +5754,7 @@ int ReplicatedPG::send_pull_legacy(int prio, int peer,
   subop->set_priority(prio);
   subop->ops = vector<OSDOp>(1);
   subop->ops[0].op.op = CEPH_OSD_OP_PULL;
-  subop->ops[0].op.extent.length = osd->osd->cct->_conf->osd_recovery_max_chunk;
+  subop->ops[0].op.extent.length = cct->_conf->osd_recovery_max_chunk;
   subop->recovery_info = recovery_info;
   subop->recovery_progress = progress;
 
@@ -6111,16 +6111,16 @@ void ReplicatedPG::send_pushes(int prio, map<int, vector<PushOp> > &pushes)
        msg->set_priority(prio);
        for (;
             (j != i->second.end() &&
-             cost < osd->osd->cct->_conf->osd_max_push_cost &&
-             pushes < osd->osd->cct->_conf->osd_max_push_objects) ;
+             cost < cct->_conf->osd_max_push_cost &&
+             pushes < cct->_conf->osd_max_push_objects) ;
             ++j) {
          dout(20) << __func__ << ": sending push " << *j
                   << " to osd." << i->first << dendl;
-         cost += j->cost(osd->osd->cct);
+         cost += j->cost(cct);
          pushes += 1;
          msg->pushes.push_back(*j);
        }
-       msg->compute_cost(osd->osd->cct);
+       msg->compute_cost(cct);
        osd->send_message_osd_cluster(msg, con);
       }
     }
@@ -6157,7 +6157,7 @@ void ReplicatedPG::send_pulls(int prio, map<int, vector<PullOp> > &pulls)
       msg->pgid = info.pgid;
       msg->map_epoch = get_osdmap()->get_epoch();
       msg->pulls.swap(i->second);
-      msg->compute_cost(osd->osd->cct);
+      msg->compute_cost(cct);
       osd->send_message_osd_cluster(msg, con);
     }
   }
@@ -6212,7 +6212,7 @@ int ReplicatedPG::build_push_op(const ObjectRecoveryInfo &recovery_info,
     new_progress.first = false;
   }
 
-  uint64_t available = osd->osd->cct->_conf->osd_recovery_max_chunk;
+  uint64_t available = cct->_conf->osd_recovery_max_chunk;
   if (!progress.omap_complete) {
     ObjectMap::ObjectMapIterator iter =
       osd->store->get_omap_iterator(coll,
@@ -6766,7 +6766,7 @@ void ReplicatedPG::mark_all_unfound_lost(int what)
   ObjectStore::Transaction *t = new ObjectStore::Transaction;
   C_PG_MarkUnfoundLost *c = new C_PG_MarkUnfoundLost(this);
 
-  utime_t mtime = ceph_clock_now(osd->osd->cct);
+  utime_t mtime = ceph_clock_now(cct);
   info.last_update.epoch = get_osdmap()->get_epoch();
   const pg_missing_t &missing = pg_log.get_missing();
   map<hobject_t, pg_missing_t::item>::const_iterator m = missing.missing.begin();
@@ -7407,7 +7407,7 @@ int ReplicatedPG::recover_primary(int max, ThreadPool::TPHandle &handle)
        ++skipped;
       } else {
        int r = prepare_pull(
-         soid, need, osd->osd->cct->_conf->osd_recovery_op_priority, &pulls);
+         soid, need, cct->_conf->osd_recovery_op_priority, &pulls);
        switch (r) {
        case PULL_YES:
          ++started;
@@ -7430,7 +7430,7 @@ int ReplicatedPG::recover_primary(int max, ThreadPool::TPHandle &handle)
       pg_log.set_last_requested(v);
   }
 
-  send_pulls(osd->osd->cct->_conf->osd_recovery_op_priority, pulls);
+  send_pulls(cct->_conf->osd_recovery_op_priority, pulls);
   return started;
 }
 
@@ -7530,12 +7530,12 @@ int ReplicatedPG::recover_replicas(int max, ThreadPool::TPHandle &handle)
       dout(10) << __func__ << ": recover_object_replicas(" << soid << ")" << dendl;
       map<hobject_t,pg_missing_t::item>::const_iterator r = m.missing.find(soid);
       started += prep_object_replica_pushes(soid, r->second.need,
-                                           osd->osd->cct->_conf->osd_recovery_op_priority,
+                                           cct->_conf->osd_recovery_op_priority,
                                            &pushes);
     }
   }
 
-  send_pushes(osd->osd->cct->_conf->osd_recovery_op_priority, pushes);
+  send_pushes(cct->_conf->osd_recovery_op_priority, pushes);
 
   return started;
 }
@@ -7707,7 +7707,7 @@ int ReplicatedPG::recover_backfill(
     prep_backfill_object_push(
       i->first, i->second.first, i->second.second, backfill_target, &pushes);
   }
-  send_pushes(osd->osd->cct->_conf->osd_recovery_op_priority, pushes);
+  send_pushes(cct->_conf->osd_recovery_op_priority, pushes);
 
   release_waiting_for_backfill_pos();
   dout(5) << "backfill_pos is " << backfill_pos << " and pinfo.last_backfill is "
@@ -7764,7 +7764,7 @@ void ReplicatedPG::prep_backfill_object_push(
   ObjectContextRef obc = get_object_context(oid, false);
   obc->ondisk_read_lock();
   (*pushes)[peer].push_back(PushOp());
-  prep_push_to_replica(obc, oid, peer, osd->osd->cct->_conf->osd_recovery_op_priority,
+  prep_push_to_replica(obc, oid, peer, cct->_conf->osd_recovery_op_priority,
                       &((*pushes)[peer].back()));
   obc->ondisk_read_unlock();
 }
@@ -7816,7 +7816,7 @@ void ReplicatedPG::check_local()
 
   assert(info.last_update >= pg_log.get_tail());  // otherwise we need some help!
 
-  if (!osd->osd->cct->_conf->osd_debug_verify_stray_on_activate)
+  if (!cct->_conf->osd_debug_verify_stray_on_activate)
     return;
 
   // just scan the log.