]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: add reshard status to bucket stats plus minor misc items 58567/head
authorJ. Eric Ivancich <ivancich@redhat.com>
Fri, 12 Jul 2024 18:39:18 +0000 (14:39 -0400)
committerJ. Eric Ivancich <ivancich@redhat.com>
Fri, 12 Jul 2024 19:13:20 +0000 (15:13 -0400)
Adds reshard status to `radosgw-admin bucket stats ...`. Renames the
variable max_entries to max_op_entries to clarify that it's not the
maximum number of entries wanted in a bucket index shard. Does some
other minor clean-ups.

Signed-off-by: J. Eric Ivancich <ivancich@redhat.com>
doc/man/8/radosgw-admin.rst
src/rgw/driver/rados/rgw_bucket.cc
src/rgw/driver/rados/rgw_reshard.cc

index 468ef1ebce6f1ccc32184919b64c58fe4773e693..7dfa59a61b1a9f65f3944a0ff65b788fcaa9380b 100644 (file)
@@ -96,7 +96,7 @@ as follows:
   Invoke with --marker to resume if the command is interrupted.
 
 :command:`bucket stats`
-  Returns bucket statistics.
+  List bucket statistics plus other internal information about a bucket.
 
 :command:`bucket rm`
   Remove a bucket.
index 14219dad4a8fc9fd78c0a884049160f2c19d4449..996de5f3c4d85ddecc4de1415a4ceea2657a964b 100644 (file)
@@ -1455,7 +1455,7 @@ static int bucket_stats(rgw::sal::Driver* driver,
 
   const RGWBucketInfo& bucket_info = bucket->get_info();
 
-  const auto& index = bucket->get_info().get_current_index();
+  const auto& index = bucket_info.get_current_index();
   if (is_layout_indexless(index)) {
     cerr << "error, indexless buckets do not maintain stats; bucket=" <<
       bucket->get_name() << std::endl;
@@ -1480,25 +1480,26 @@ static int bucket_stats(rgw::sal::Driver* driver,
                         bucket->versioned()
                         ? (bucket->versioning_enabled() ? "enabled" : "suspended")
                         : "off");
-  formatter->dump_string("zonegroup", bucket->get_info().zonegroup);
-  formatter->dump_string("placement_rule", bucket->get_info().placement_rule.to_str());
+  formatter->dump_string("zonegroup", bucket_info.zonegroup);
+  formatter->dump_string("placement_rule", bucket_info.placement_rule.to_str());
   ::encode_json("explicit_placement", bucket->get_key().explicit_placement, formatter);
   formatter->dump_string("id", bucket->get_bucket_id());
   formatter->dump_string("marker", bucket->get_marker());
-  formatter->dump_stream("index_type") << bucket->get_info().layout.current_index.layout.type;
-  formatter->dump_int("index_generation", bucket->get_info().layout.current_index.gen);
+  formatter->dump_stream("index_type") << bucket_info.layout.current_index.layout.type;
+  formatter->dump_int("index_generation", bucket_info.layout.current_index.gen);
   formatter->dump_int("num_shards",
-                     bucket->get_info().layout.current_index.layout.normal.num_shards);
+                     bucket_info.layout.current_index.layout.normal.num_shards);
+  formatter->dump_string("reshard_status", to_string(bucket_info.reshard_status));
   formatter->dump_bool("object_lock_enabled", bucket_info.obj_lock_enabled());
   formatter->dump_bool("mfa_enabled", bucket_info.mfa_enabled());
-  ::encode_json("owner", bucket->get_info().owner, formatter);
+  ::encode_json("owner", bucket_info.owner, formatter);
   formatter->dump_string("ver", bucket_ver);
   formatter->dump_string("master_ver", master_ver);
   ut.gmtime(formatter->dump_stream("mtime"));
   ctime_ut.gmtime(formatter->dump_stream("creation_time"));
   formatter->dump_string("max_marker", max_marker);
   dump_bucket_usage(stats, formatter);
-  encode_json("bucket_quota", bucket->get_info().quota, formatter);
+  encode_json("bucket_quota", bucket_info.quota, formatter);
 
   // bucket tags
   auto iter = bucket->get_attrs().find(RGW_ATTR_TAGS);
index 9bd4aad89a1b5e6dc94cca5979fd44e597d2c947..725c37d89a9da0af38cbda1193a6fd8353a3309c 100644 (file)
@@ -415,7 +415,7 @@ static int init_target_layout(rgw::sal::RadosStore* store,
                               RGWBucketInfo& bucket_info,
                              std::map<std::string, bufferlist>& bucket_attrs,
                               ReshardFaultInjector& fault,
-                              uint32_t new_num_shards,
+                              const uint32_t new_num_shards,
                               const DoutPrefixProvider* dpp, optional_yield y)
 {
   auto prev = bucket_info.layout; // make a copy for cleanup
@@ -591,7 +591,7 @@ static int init_reshard(rgw::sal::RadosStore* store,
                         RGWBucketInfo& bucket_info,
                        std::map<std::string, bufferlist>& bucket_attrs,
                         ReshardFaultInjector& fault,
-                        uint32_t new_num_shards,
+                        const uint32_t new_num_shards,
                         const DoutPrefixProvider *dpp, optional_yield y)
 {
   if (new_num_shards == 0) {
@@ -617,6 +617,7 @@ static int init_reshard(rgw::sal::RadosStore* store,
     revert_target_layout(store, bucket_info, bucket_attrs, fault, dpp, y);
     return ret;
   }
+
   return 0;
 } // init_reshard
 
@@ -884,7 +885,7 @@ int RGWBucketReshardLock::renew(const Clock::time_point& now) {
 
 int RGWBucketReshard::do_reshard(const rgw::bucket_index_layout_generation& current,
                                  const rgw::bucket_index_layout_generation& target,
-                                 int max_entries,
+                                 int max_op_entries, // max num to process per op
                                 bool verbose,
                                 ostream *out,
                                 Formatter *formatter,
@@ -898,9 +899,9 @@ int RGWBucketReshard::do_reshard(const rgw::bucket_index_layout_generation& curr
   /* update bucket info -- in progress*/
   list<rgw_cls_bi_entry> entries;
 
-  if (max_entries < 0) {
+  if (max_op_entries <= 0) {
     ldpp_dout(dpp, 0) << __func__ <<
-      ": can't reshard, negative max_entries" << dendl;
+      ": can't reshard, non-positive max_op_entries" << dendl;
     return -EINVAL;
   }
 
@@ -926,7 +927,9 @@ int RGWBucketReshard::do_reshard(const rgw::bucket_index_layout_generation& curr
     const std::string null_object_filter; // empty string since we're not filtering by object
     while (is_truncated) {
       entries.clear();
-      int ret = store->getRados()->bi_list(dpp, bucket_info, i, null_object_filter, marker, max_entries, &entries, &is_truncated, y);
+      int ret = store->getRados()->bi_list(
+       dpp, bucket_info, i, null_object_filter, marker, max_op_entries,
+       &entries, &is_truncated, y);
       if (ret == -ENOENT) {
         ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " failed to find shard "
             << i << ", skipping" << dendl;
@@ -1032,7 +1035,7 @@ int RGWBucketReshard::get_status(const DoutPrefixProvider *dpp, list<cls_rgw_buc
 
 int RGWBucketReshard::execute(int num_shards,
                               ReshardFaultInjector& fault,
-                              int max_op_entries,
+                              int max_op_entries,  // max num to process per op
                              const cls_rgw_reshard_initiator initiator,
                               const DoutPrefixProvider *dpp,
                              optional_yield y,
@@ -1309,7 +1312,7 @@ void RGWReshardWait::stop()
 }
 
 int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry,
-                              int max_entries,
+                              int max_op_entries, // max num to process per op
                              const DoutPrefixProvider* dpp,
                              optional_yield y)
 {
@@ -1462,7 +1465,7 @@ int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry,
   RGWBucketReshard br(store, bucket_info, bucket_attrs, nullptr);
 
   ReshardFaultInjector f; // no fault injected
-  ret = br.execute(entry.new_num_shards, f, max_entries, entry.initiator,
+  ret = br.execute(entry.new_num_shards, f, max_op_entries, entry.initiator,
                   dpp, y, false, nullptr, nullptr, this);
   if (ret < 0) {
     ldpp_dout(dpp, 0) <<  __func__ <<
@@ -1481,10 +1484,14 @@ int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry,
 
 int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvider *dpp, optional_yield y)
 {
-  string marker;
-  bool truncated = true;
+  std::string marker;
+  bool is_truncated = true;
 
-  constexpr uint32_t max_entries = 1000;
+  // This is the number to request per op, whether it's reshard queue
+  // entries or bucket index entries. Should not be confused with the
+  // number of entries we allow in a bucket index shard. This value is
+  // passed in and used deeper into the call chain as well.
+  constexpr uint32_t max_op_entries = 1000;
 
   string logshard_oid;
   get_logshard_oid(logshard_num, &logshard_oid);
@@ -1500,15 +1507,15 @@ int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvid
   
   do {
     std::list<cls_rgw_reshard_entry> entries;
-    ret = list(dpp, logshard_num, marker, max_entries, entries, &truncated);
+    ret = list(dpp, logshard_num, marker, max_op_entries, entries, &is_truncated);
     if (ret < 0) {
       ldpp_dout(dpp, 10) << "cannot list all reshards in logshard oid=" <<
        logshard_oid << dendl;
       continue;
     }
 
-    for(auto& entry : entries) { // logshard entries
-      process_entry(entry, max_entries, dpp, y);
+    for(const auto& entry : entries) { // logshard entries
+      process_entry(entry, max_op_entries, dpp, y);
 
       Clock::time_point now = Clock::now();
       if (logshard_lock.should_renew(now)) {
@@ -1520,7 +1527,7 @@ int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvid
 
       entry.get_key(&marker);
     } // entry for loop
-  } while (truncated);
+  } while (is_truncated);
 
   logshard_lock.unlock();
   return 0;
@@ -1592,8 +1599,9 @@ void *RGWReshard::ReshardWorker::entry() {
     utime_t start = ceph_clock_now();
     reshard->process_all_logshards(this, null_yield);
 
-    if (reshard->going_down())
+    if (reshard->going_down()) {
       break;
+    }
 
     utime_t end = ceph_clock_now();
     utime_t elapsed = end - start;