]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: add checks for non-empty layout.logs
authorCasey Bodley <cbodley@redhat.com>
Tue, 18 Jan 2022 21:48:25 +0000 (16:48 -0500)
committerCasey Bodley <cbodley@redhat.com>
Tue, 8 Feb 2022 21:32:24 +0000 (16:32 -0500)
always verify that logs is not empty before calling logs.back() or
logs.front()

Signed-off-by: Casey Bodley <cbodley@redhat.com>
src/rgw/rgw_admin.cc
src/rgw/rgw_data_sync.cc
src/rgw/rgw_rados.cc
src/rgw/rgw_reshard.cc
src/rgw/rgw_rest_log.cc
src/rgw/rgw_trim_bilog.cc
src/rgw/services/svc_bi_rados.cc

index 7318519d4cdb6e03b2b744ac66aa2c23f2222caa..c0a505c26c59e5ba2656cad6864a4ae3d074264d 100644 (file)
@@ -2541,7 +2541,12 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
     shard_status.resize(full_status.shards_done_with_gen.size());
   } else if (r == -ENOENT) {
     // no full status, but there may be per-shard status from before upgrade
-    const auto& log = source_bucket->get_info().layout.logs.front();
+    const auto& logs = source_bucket->get_info().layout.logs;
+    if (logs.empty()) {
+      out << indented{width} << "init: bucket sync has not started\n";
+      return 0;
+    }
+    const auto& log = logs.front();
     if (log.gen > 0) {
       // this isn't the backward-compatible case, so we just haven't started yet
       out << indented{width} << "init: bucket sync has not started\n";
@@ -2552,7 +2557,7 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
       return -EINVAL;
     }
     // use shard count from our log gen=0
-    shard_status.resize(log.layout.in_index.layout.num_shards);
+    shard_status.resize(rgw::num_shards(log.layout.in_index));
   } else {
     lderr(store->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl;
     return r;
index ba508b0783f1d51001c0052106ef02f261486589..02d23dc86a68ffa2289c52ff83a4f0ddc2e30491 100644 (file)
@@ -3159,12 +3159,12 @@ public:
        if (check_compat) {
          // use shard count from our log gen=0
          // try to convert existing per-shard incremental status for backward compatibility
-         if (source_info.layout.logs.front().gen > 0) {
+         if (source_info.layout.logs.empty() ||
+             source_info.layout.logs.front().gen > 0) {
            ldpp_dout(dpp, 20) << "no generation zero when checking compatibility" << dendl;
            no_zero = true;
-         }
-         if (auto& log = source_info.layout.logs.front();
-             log.layout.type != rgw::BucketLogType::InIndex) {
+         } else if (auto& log = source_info.layout.logs.front();
+                     log.layout.type != rgw::BucketLogType::InIndex) {
            ldpp_dout(dpp, 20) << "unrecognized log layout type when checking compatibility " << log.layout.type << dendl;
            no_zero = true;
          }
index 2d589cdf067a958b6cf1c7956aa3784536ab177f..f829db023b092c71ecd8d124b1f9f578bcda4c82 100644 (file)
@@ -791,6 +791,21 @@ int RGWRados::get_max_chunk_size(const rgw_placement_rule& placement_rule, const
   return get_max_chunk_size(pool, max_chunk_size, dpp, palignment);
 }
 
+void add_datalog_entry(const DoutPrefixProvider* dpp,
+                       RGWDataChangesLog* datalog,
+                       const RGWBucketInfo& bucket_info,
+                       uint32_t shard_id)
+{
+  const auto& logs = bucket_info.layout.logs;
+  if (logs.empty()) {
+    return;
+  }
+  int r = datalog->add_entry(dpp, bucket_info, logs.back(), shard_id);
+  if (r < 0) {
+    ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
+  } // datalog error is not fatal
+}
+
 class RGWIndexCompletionManager;
 
 struct complete_op_data {
@@ -888,12 +903,8 @@ int RGWIndexCompletionThread::process(const DoutPrefixProvider *dpp)
       /* ignoring error, can't do anything about it */
       continue;
     }
-    r = store->svc.datalog_rados->add_entry(this, bucket_info,
-                                           bucket_info.layout.logs.back(),
-                                           bs.shard_id);
-    if (r < 0) {
-      ldpp_dout(this, -1) << "ERROR: failed writing data log" << dendl;
-    }
+
+    add_datalog_entry(this, store->svc.datalog_rados, bucket_info, bs.shard_id);
   }
 
   return 0;
@@ -5228,13 +5239,8 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi
       return r;
     }
 
-    r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
-                                           target->bucket_info.layout.logs.back(),
-                                           bs->shard_id);
-    if (r < 0) {
-      ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
-      return r;
-    }
+    add_datalog_entry(dpp, store->svc.datalog_rados,
+                      target->bucket_info, bs->shard_id);
 
     return 0;
   }
@@ -6304,12 +6310,8 @@ int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64
 
   ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
 
-  int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
-                                             target->bucket_info.layout.logs.back(),
-                                             bs->shard_id);
-  if (r < 0) {
-    ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
-  }
+  add_datalog_entry(dpp, store->svc.datalog_rados,
+                    target->bucket_info, bs->shard_id);
 
   return ret;
 }
@@ -6333,12 +6335,8 @@ int RGWRados::Bucket::UpdateIndex::complete_del(const DoutPrefixProvider *dpp,
 
   ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace);
 
-  int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
-                                             target->bucket_info.layout.logs.back(),
-                                             bs->shard_id);
-  if (r < 0) {
-    ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
-  }
+  add_datalog_entry(dpp, store->svc.datalog_rados,
+                    target->bucket_info, bs->shard_id);
 
   return ret;
 }
@@ -6362,12 +6360,8 @@ int RGWRados::Bucket::UpdateIndex::cancel(const DoutPrefixProvider *dpp,
    * for following the specific bucket shard log. Otherwise they end up staying behind, and users
    * have no way to tell that they're all caught up
    */
-  int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
-                                             target->bucket_info.layout.logs.back(),
-                                             bs->shard_id);
-  if (r < 0) {
-    ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
-  }
+  add_datalog_entry(dpp, store->svc.datalog_rados,
+                    target->bucket_info, bs->shard_id);
 
   return ret;
 }
@@ -6996,12 +6990,7 @@ int RGWRados::bucket_index_link_olh(const DoutPrefixProvider *dpp, RGWBucketInfo
     return r;
   }
 
-  r = svc.datalog_rados->add_entry(dpp, bucket_info,
-                                  bucket_info.layout.logs.back(),
-                                  bs.shard_id);
-  if (r < 0) {
-    ldpp_dout(dpp, 0) << "ERROR: failed writing data log" << dendl;
-  }
+  add_datalog_entry(dpp, svc.datalog_rados, bucket_info, bs.shard_id);
 
   return 0;
 }
index cd8a246ffaedea33c4194f0b1b2cfd07327dfb11..ffc176c591e8fe5f219479580e46cfbab1d78f2e 100644 (file)
@@ -484,7 +484,8 @@ static int commit_reshard(rgw::sal::RadosStore* store,
 {
   auto& layout = bucket_info.layout;
   auto prev = layout; // make a copy for cleanup
-  const auto next_log_gen = layout.logs.back().gen + 1;
+  const auto next_log_gen = layout.logs.empty() ? 1 :
+      layout.logs.back().gen + 1;
 
   bool remove_index = true;
 
@@ -536,15 +537,20 @@ static int commit_reshard(rgw::sal::RadosStore* store,
     return ret;
   }
 
-  if (store->svc()->zone->need_to_log_data()) {
+  if (store->svc()->zone->need_to_log_data() && !prev.logs.empty() &&
+      prev.current_index.layout.type == rgw::BucketIndexType::Normal) {
+    // write a datalog entry for each shard of the previous index. triggering
+    // sync on the old shards will force them to detect the end-of-log for that
+    // generation, and eventually transition to the next
+    // TODO: use a log layout to support types other than BucketLogType::InIndex
     for (uint32_t shard_id = 0; shard_id < prev.current_index.layout.normal.num_shards; ++shard_id) {
       ret = store->svc()->datalog_rados->add_entry(dpp, bucket_info, prev.logs.back(), shard_id);
       if (ret < 0) {
         ldpp_dout(dpp, 1) << "WARNING: failed writing data log (bucket_info.bucket="
         << bucket_info.bucket << ", shard_id=" << shard_id << "of generation="
         << prev.logs.back().gen << ")" << dendl;
-        }
-      }
+      } // datalog error is not fatal
+    }
   }
 
   // on success, delete index shard objects from the old layout (ignore errors)
index 9c6ec608b8d2f72dba498386439591a39a18ea0a..fe91a16de60bb97a4dad02f6b5fcd8dc9a80e489 100644 (file)
@@ -421,6 +421,12 @@ void RGWOp_BILog_List::execute(optional_yield y) {
   }
 
   const auto& logs = bucket->get_info().layout.logs;
+  if (logs.empty()) {
+    ldpp_dout(s, 5) << "ERROR: bucket=" << bucket_name << " has no log layouts" << dendl;
+    op_ret = -ENOENT;
+    return;
+  }
+
   auto log = std::prev(logs.end());
   if (gen) {
     log = std::find_if(logs.begin(), logs.end(), rgw::matches_gen(*gen));
@@ -541,9 +547,15 @@ void RGWOp_BILog_Info::execute(optional_yield y) {
     return;
   }
 
+  const auto& logs = bucket->get_info().layout.logs;
+  if (logs.empty()) {
+    ldpp_dout(s, 5) << "ERROR: bucket=" << bucket_name << " has no log layouts" << dendl;
+    op_ret = -ENOENT;
+    return;
+  }
+
   map<RGWObjCategory, RGWStorageStats> stats;
-  const auto& latest_log = bucket->get_info().layout.logs.back();
-  const auto& index = log_to_index_layout(latest_log);
+  const auto& index = log_to_index_layout(logs.back());
 
   int ret =  bucket->read_stats(s, index, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
   if (ret < 0 && ret != -ENOENT) {
@@ -551,8 +563,8 @@ void RGWOp_BILog_Info::execute(optional_yield y) {
     return;
   }
 
-  oldest_gen = bucket->get_info().layout.logs.front().gen;
-  latest_gen = latest_log.gen;
+  oldest_gen = logs.front().gen;
+  latest_gen = logs.back().gen;
 }
 
 void RGWOp_BILog_Info::send_response() {
index 287dd9addaf78a465efba8d4a970f349e5acc9ac..16263e0fd9b31af95fda6c3ac4c03be9541504da 100644 (file)
@@ -640,6 +640,10 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp)
       return set_cr_error(-ENOENT);
     }
 
+    if (pbucket_info->layout.logs.empty()) {
+      return set_cr_done(); // no bilogs to trim
+    }
+
     // query peers for sync status
     set_status("fetching sync status from relevant peers");
     yield {
index a4b89162630418fc0df331678e1956c77903db1d..ba622d915017c6df30c4e8366b70b93b34397114 100644 (file)
@@ -472,30 +472,34 @@ int RGWSI_BucketIndex_RADOS::handle_overwrite(const DoutPrefixProvider *dpp,
   bool new_sync_enabled = info.datasync_flag_enabled();
   bool old_sync_enabled = orig_info.datasync_flag_enabled();
 
-  if (old_sync_enabled != new_sync_enabled) {
-    int shards_num = rgw::current_num_shards(info.layout);
-    int shard_id = info.layout.current_index.layout.normal.num_shards? 0 : -1;
-    const auto& log_layout = info.layout.logs.back();
-
-    int ret;
-    if (!new_sync_enabled) {
-      ret = svc.bilog->log_stop(dpp, info, log_layout, -1);
-    } else {
-      ret = svc.bilog->log_start(dpp, info, log_layout, -1);
-    }
-    if (ret < 0) {
-      ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
-      return ret;
-    }
+  if (old_sync_enabled == new_sync_enabled) {
+    return 0; // datasync flag didn't change
+  }
+  if (info.layout.logs.empty()) {
+    return 0; // no bilog
+  }
+  const auto& bilog = info.layout.logs.back();
+  if (bilog.layout.type != rgw::BucketLogType::InIndex) {
+    return -ENOTSUP;
+  }
+  const int shards_num = rgw::num_shards(bilog.layout.in_index);
 
-    for (int i = 0; i < shards_num; ++i, ++shard_id) {
-      ret = svc.datalog_rados->add_entry(dpp, info, info.layout.logs.back(),
-                                        shard_id);
-      if (ret < 0) {
-        ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << shard_id << ")" << dendl;
-        return ret;
-      }
-    }
+  int ret;
+  if (!new_sync_enabled) {
+    ret = svc.bilog->log_stop(dpp, info, bilog, -1);
+  } else {
+    ret = svc.bilog->log_start(dpp, info, bilog, -1);
+  }
+  if (ret < 0) {
+    ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
+    return ret;
+  }
+
+  for (int i = 0; i < shards_num; ++i) {
+    ret = svc.datalog_rados->add_entry(dpp, info, bilog, i);
+    if (ret < 0) {
+      ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << i << ")" << dendl;
+    } // datalog error is not fatal
   }
 
   return 0;