]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: add rgw_bucket_sync_status() to bypass manager
authorCasey Bodley <cbodley@redhat.com>
Tue, 12 Sep 2017 16:48:56 +0000 (12:48 -0400)
committerCasey Bodley <cbodley@redhat.com>
Fri, 10 Nov 2017 18:23:01 +0000 (13:23 -0500)
RGWBucketSyncStatusManager::init() is doing a lot of extra work that's
not needed to serve the rest api (spawning an http manager thread,
fetching the bucket instance info, etc)

uses RGWShardCollectCR to limit the number of concurrent reads to 16

Signed-off-by: Casey Bodley <cbodley@redhat.com>
src/rgw/rgw_data_sync.cc
src/rgw/rgw_data_sync.h

index af6cfd2ff10a5b88d43acbc8acbafb6bae0bb249..2a29a18fb0d87e3b3a68734910cf624ee478c8ae 100644 (file)
@@ -3106,6 +3106,61 @@ string RGWBucketSyncStatusManager::status_oid(const string& source_zone,
   return bucket_status_oid_prefix + "." + source_zone + ":" + bs.get_key();
 }
 
+class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR {
+  static constexpr int max_concurrent_shards = 16;
+  RGWRados *const store;
+  RGWDataSyncEnv *const env;
+  const int num_shards;
+  rgw_bucket_shard bs;
+
+  using Vector = std::vector<rgw_bucket_shard_sync_info>;
+  Vector::iterator i, end;
+
+ public:
+  RGWCollectBucketSyncStatusCR(RGWRados *store, RGWDataSyncEnv *env,
+                               int num_shards, const rgw_bucket& bucket,
+                               Vector *status)
+    : RGWShardCollectCR(store->ctx(), max_concurrent_shards),
+      store(store), env(env), num_shards(num_shards),
+      bs(bucket, num_shards > 0 ? 0 : -1), // start at shard 0 or -1
+      i(status->begin()), end(status->end())
+  {}
+
+  bool spawn_next() override {
+    if (i == end) {
+      return false;
+    }
+    spawn(new RGWReadBucketSyncStatusCoroutine(env, bs, &*i), false);
+    ++i;
+    ++bs.shard_id;
+    return true;
+  }
+};
+
+int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone,
+                           const rgw_bucket& bucket,
+                           std::vector<rgw_bucket_shard_sync_info> *status)
+{
+  // read the bucket instance info for num_shards
+  RGWObjectCtx ctx(store);
+  RGWBucketInfo info;
+  int ret = store->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr);
+  if (ret < 0) {
+    return ret;
+  }
+  status->clear();
+  status->resize(std::max<size_t>(1, info.num_shards));
+
+  RGWDataSyncEnv env;
+  RGWSyncModuleInstanceRef module; // null sync module
+  env.init(store->ctx(), store, nullptr, store->get_async_rados(),
+           nullptr, nullptr, nullptr, source_zone, module);
+
+  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  return crs.run(new RGWCollectBucketSyncStatusCR(store, &env, info.num_shards,
+                                                  bucket, status));
+}
+
 
 // TODO: move into rgw_data_sync_trim.cc
 #undef dout_prefix
index 2a59fdac328b9c570d1de4c7ccfcc648b91c638c..f4d3c7e7138aeac3a3f505a43aae0a655ed6102a 100644 (file)
@@ -530,6 +530,11 @@ public:
   int run();
 };
 
+/// read the sync status of all bucket shards from the given source zone
+int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone,
+                           const rgw_bucket& bucket,
+                           std::vector<rgw_bucket_shard_sync_info> *status);
+
 class RGWDefaultSyncModule : public RGWSyncModule {
 public:
   RGWDefaultSyncModule() {}