]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
OSD: rename gen_wq, schedule_work, and PG_QueueAsync to include "recovery"
authorGreg Farnum <greg@inktank.com>
Mon, 24 Mar 2014 21:35:02 +0000 (14:35 -0700)
committerGreg Farnum <greg@inktank.com>
Mon, 5 May 2014 22:29:15 +0000 (15:29 -0700)
These all hook into the recovery thread pool and need to make that obvious.

Signed-off-by: Greg Farnum <greg@inktank.com>
src/osd/ECBackend.cc
src/osd/OSD.cc
src/osd/OSD.h
src/osd/PGBackend.h
src/osd/ReplicatedBackend.cc
src/osd/ReplicatedPG.cc
src/osd/ReplicatedPG.h

index 3c27288c53259e758fcb733cdf717839960bca4e..3aae53bd6bc983cd6db0e64a6a9be374336114c9 100644 (file)
@@ -1091,7 +1091,7 @@ void ECBackend::filter_read_op(
   }
 
   if (op.in_progress.empty()) {
-    get_parent()->schedule_work(
+    get_parent()->schedule_recovery_work(
       get_parent()->bless_gencontext(
        new FinishReadOp(this, op.tid)));
   }
index 0ed88cb33c3b29c8b9ef7e48bb763f8d32addb1f..1ab4fd26b630333ad20d233464fd9ada42e13856 100644 (file)
@@ -188,7 +188,7 @@ OSDService::OSDService(OSD *osd) :
   scrub_wq(osd->scrub_wq),
   scrub_finalize_wq(osd->scrub_finalize_wq),
   rep_scrub_wq(osd->rep_scrub_wq),
-  gen_wq("gen_wq", cct->_conf->osd_recovery_thread_timeout, &osd->recovery_tp),
+  recovery_gen_wq("gen_wq", cct->_conf->osd_recovery_thread_timeout, &osd->recovery_tp),
   class_handler(osd->class_handler),
   publish_lock("OSDService::publish_lock"),
   pre_publish_lock("OSDService::pre_publish_lock"),
index 7b87d2ea262c83942d64c4708d0021ddbe7f7c67..4f45f016ba13f2ba7cf6d2337fc520766c11e204 100644 (file)
@@ -328,7 +328,7 @@ public:
   ThreadPool::WorkQueue<PG> &scrub_wq;
   ThreadPool::WorkQueue<PG> &scrub_finalize_wq;
   ThreadPool::WorkQueue<MOSDRepScrub> &rep_scrub_wq;
-  GenContextWQ gen_wq;
+  GenContextWQ recovery_gen_wq;
   ClassHandler  *&class_handler;
 
   void dequeue_pg(PG *pg, list<OpRequestRef> *dequeued);
index e1e0423cc2e9fb00426995a0a04c934bb38cbb83..dd372612bebbb3e0bb211798aca642698cec1ee0 100644 (file)
      virtual void update_stats(
        const pg_stat_t &stat) = 0;
 
-     virtual void schedule_work(
+     virtual void schedule_recovery_work(
        GenContext<ThreadPool::TPHandle&> *c) = 0;
 
      virtual pg_shard_t whoami_shard() const = 0;
@@ -624,14 +624,14 @@ struct PG_SendMessageOnConn: public Context {
   }
 };
 
-struct PG_QueueAsync : public Context {
+struct PG_RecoveryQueueAsync : public Context {
   PGBackend::Listener *pg;
   GenContext<ThreadPool::TPHandle&> *c;
-  PG_QueueAsync(
+  PG_RecoveryQueueAsync(
     PGBackend::Listener *pg,
     GenContext<ThreadPool::TPHandle&> *c) : pg(pg), c(c) {}
   void finish(int) {
-    pg->schedule_work(c);
+    pg->schedule_recovery_work(c);
   }
 };
 
index 5a9668fc2a161c52e9b546099a37eb0a220be162..0763c7f740a59e2eba525274a30ddf1e2e915ae4 100644 (file)
@@ -254,14 +254,14 @@ void ReplicatedBackend::objects_read_async(
     int _r = store->read(coll, hoid, i->first.first,
                         i->first.second, *(i->second.first));
     if (i->second.second) {
-      get_parent()->schedule_work(
+      get_parent()->schedule_recovery_work(
        get_parent()->bless_gencontext(
          new AsyncReadCallback(_r, i->second.second)));
     }
     if (_r < 0)
       r = _r;
   }
-  get_parent()->schedule_work(
+  get_parent()->schedule_recovery_work(
     get_parent()->bless_gencontext(
       new AsyncReadCallback(r, on_complete)));
 }
index 68b14040f7ee095b10a6820b2723a4228c94fcd3..4d3415fc916f0c4f3edd87a6178399481415df3c 100644 (file)
@@ -333,10 +333,10 @@ void ReplicatedPG::begin_peer_recover(
   peer_missing[peer].revise_have(soid, eversion_t());
 }
 
-void ReplicatedPG::schedule_work(
+void ReplicatedPG::schedule_recovery_work(
   GenContext<ThreadPool::TPHandle&> *c)
 {
-  osd->gen_wq.queue(c);
+  osd->recovery_gen_wq.queue(c);
 }
 
 void ReplicatedPG::send_message_osd_cluster(
@@ -2158,7 +2158,7 @@ void ReplicatedBackend::_do_pull_response(OpRequestRef op)
        m->get_priority());
     c->to_continue.swap(to_continue);
     t->register_on_complete(
-      new PG_QueueAsync(
+      new PG_RecoveryQueueAsync(
        get_parent(),
        get_parent()->bless_gencontext(c)));
   }
@@ -8938,7 +8938,7 @@ void ReplicatedBackend::sub_op_push(OpRequestRef op)
          op->get_req()->get_priority());
       c->to_continue.swap(to_continue);
       t->register_on_complete(
-       new PG_QueueAsync(
+       new PG_RecoveryQueueAsync(
          get_parent(),
          get_parent()->bless_gencontext(c)));
     }
index 3ea47218aadaed8ddbff1d831eb4c6c182b8835d..61fd55763ec5d3af968dd8e774725da070db44d9 100644 (file)
@@ -388,7 +388,7 @@ public:
     info.stats = stat;
   }
 
-  void schedule_work(
+  void schedule_recovery_work(
     GenContext<ThreadPool::TPHandle&> *c);
 
   pg_shard_t whoami_shard() const {