]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
common/AsyncReserver: template out the finisher type
authorSamuel Just <sjust@redhat.com>
Mon, 12 Aug 2019 23:36:44 +0000 (16:36 -0700)
committerXuehan Xu <xxhdx1985126@163.com>
Sun, 26 Apr 2020 07:46:35 +0000 (15:46 +0800)
Signed-off-by: Samuel Just <sjust@redhat.com>
src/common/AsyncReserver.h
src/osd/OSD.h

index bbc8b72a1f7d8e97787cc19a27d6c8ac0839c82e..b80f9e7df8f60810888f801d9d76454d1ec4805d 100644 (file)
@@ -15,7 +15,6 @@
 #ifndef ASYNC_RESERVER_H
 #define ASYNC_RESERVER_H
 
-#include "common/Finisher.h"
 #include "common/Formatter.h"
 
 #define rdout(x) lgeneric_subdout(cct,reserver,x)
  * linear with respect to the total number of priorities used
  * over all time.
  */
-template <typename T>
+template <typename T, typename F>
 class AsyncReserver {
   CephContext *cct;
-  Finisher *f;
+  F *f;
   unsigned max_allowed;
   unsigned min_priority;
   ceph::mutex lock = ceph::make_mutex("AsyncReserver::lock");
@@ -122,7 +121,7 @@ class AsyncReserver {
 public:
   AsyncReserver(
     CephContext *cct,
-    Finisher *f,
+    F *f,
     unsigned max_allowed,
     unsigned min_priority = 0)
     : cct(cct),
index 4eab442a9eb3a3f8734d5c1d4cc9290f948130fc..44dcae442e45cda1dd57cd76788b4e080180599d 100644 (file)
@@ -59,6 +59,7 @@
 #include "messages/MOSDOp.h"
 #include "common/EventTrace.h"
 #include "osd/osd_perf_counters.h"
+#include "common/Finisher.h"
 
 #define CEPH_OSD_PROTOCOL    10 /* cluster internal */
 
@@ -556,8 +557,8 @@ public:
 
   // -- backfill_reservation --
   Finisher reserver_finisher;
-  AsyncReserver<spg_t> local_reserver;
-  AsyncReserver<spg_t> remote_reserver;
+  AsyncReserver<spg_t, Finisher> local_reserver;
+  AsyncReserver<spg_t, Finisher> remote_reserver;
 
   // -- pg merge --
   ceph::mutex merge_lock = ceph::make_mutex("OSD::merge_lock");
@@ -605,7 +606,7 @@ public:
   void prune_pg_created();
   void send_pg_created();
 
-  AsyncReserver<spg_t> snap_reserver;
+  AsyncReserver<spg_t, Finisher> snap_reserver;
   void queue_recovery_context(PG *pg, GenContext<ThreadPool::TPHandle&> *c);
   void queue_for_snap_trim(PG *pg);
   void queue_for_scrub(PG *pg, bool with_high_priority);