shard_status.resize(full_status.shards_done_with_gen.size());
} else if (r == -ENOENT) {
// no full status, but there may be per-shard status from before upgrade
- const auto& log = source_bucket->get_info().layout.logs.front();
+ const auto& logs = source_bucket->get_info().layout.logs;
+ if (logs.empty()) {
+ out << indented{width} << "init: bucket sync has not started\n";
+ return 0;
+ }
+ const auto& log = logs.front();
if (log.gen > 0) {
// this isn't the backward-compatible case, so we just haven't started yet
out << indented{width} << "init: bucket sync has not started\n";
return -EINVAL;
}
// use shard count from our log gen=0
- shard_status.resize(log.layout.in_index.layout.num_shards);
+ shard_status.resize(rgw::num_shards(log.layout.in_index));
} else {
lderr(store->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl;
return r;
if (check_compat) {
// use shard count from our log gen=0
// try to convert existing per-shard incremental status for backward compatibility
- if (source_info.layout.logs.front().gen > 0) {
+ if (source_info.layout.logs.empty() ||
+ source_info.layout.logs.front().gen > 0) {
ldpp_dout(dpp, 20) << "no generation zero when checking compatibility" << dendl;
no_zero = true;
- }
- if (auto& log = source_info.layout.logs.front();
- log.layout.type != rgw::BucketLogType::InIndex) {
+ } else if (auto& log = source_info.layout.logs.front();
+ log.layout.type != rgw::BucketLogType::InIndex) {
ldpp_dout(dpp, 20) << "unrecognized log layout type when checking compatibility " << log.layout.type << dendl;
no_zero = true;
}
return get_max_chunk_size(pool, max_chunk_size, dpp, palignment);
}
+void add_datalog_entry(const DoutPrefixProvider* dpp,
+ RGWDataChangesLog* datalog,
+ const RGWBucketInfo& bucket_info,
+ uint32_t shard_id)
+{
+ const auto& logs = bucket_info.layout.logs;
+ if (logs.empty()) {
+ return;
+ }
+ int r = datalog->add_entry(dpp, bucket_info, logs.back(), shard_id);
+ if (r < 0) {
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
+ } // datalog error is not fatal
+}
+
class RGWIndexCompletionManager;
struct complete_op_data {
/* ignoring error, can't do anything about it */
continue;
}
- r = store->svc.datalog_rados->add_entry(this, bucket_info,
- bucket_info.layout.logs.back(),
- bs.shard_id);
- if (r < 0) {
- ldpp_dout(this, -1) << "ERROR: failed writing data log" << dendl;
- }
+
+ add_datalog_entry(this, store->svc.datalog_rados, bucket_info, bs.shard_id);
}
return 0;
return r;
}
- r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
- target->bucket_info.layout.logs.back(),
- bs->shard_id);
- if (r < 0) {
- ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
- return r;
- }
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id);
return 0;
}
ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
- int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
- target->bucket_info.layout.logs.back(),
- bs->shard_id);
- if (r < 0) {
- ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
- }
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id);
return ret;
}
ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace);
- int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
- target->bucket_info.layout.logs.back(),
- bs->shard_id);
- if (r < 0) {
- ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
- }
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id);
return ret;
}
* for following the specific bucket shard log. Otherwise they end up staying behind, and users
* have no way to tell that they're all caught up
*/
- int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info,
- target->bucket_info.layout.logs.back(),
- bs->shard_id);
- if (r < 0) {
- ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
- }
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id);
return ret;
}
return r;
}
- r = svc.datalog_rados->add_entry(dpp, bucket_info,
- bucket_info.layout.logs.back(),
- bs.shard_id);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "ERROR: failed writing data log" << dendl;
- }
+ add_datalog_entry(dpp, svc.datalog_rados, bucket_info, bs.shard_id);
return 0;
}
{
auto& layout = bucket_info.layout;
auto prev = layout; // make a copy for cleanup
- const auto next_log_gen = layout.logs.back().gen + 1;
+ const auto next_log_gen = layout.logs.empty() ? 1 :
+ layout.logs.back().gen + 1;
bool remove_index = true;
return ret;
}
- if (store->svc()->zone->need_to_log_data()) {
+ if (store->svc()->zone->need_to_log_data() && !prev.logs.empty() &&
+ prev.current_index.layout.type == rgw::BucketIndexType::Normal) {
+ // write a datalog entry for each shard of the previous index. triggering
+ // sync on the old shards will force them to detect the end-of-log for that
+ // generation, and eventually transition to the next
+ // TODO: use a log layout to support types other than BucketLogType::InIndex
for (uint32_t shard_id = 0; shard_id < prev.current_index.layout.normal.num_shards; ++shard_id) {
ret = store->svc()->datalog_rados->add_entry(dpp, bucket_info, prev.logs.back(), shard_id);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: failed writing data log (bucket_info.bucket="
<< bucket_info.bucket << ", shard_id=" << shard_id << "of generation="
<< prev.logs.back().gen << ")" << dendl;
- }
- }
+ } // datalog error is not fatal
+ }
}
// on success, delete index shard objects from the old layout (ignore errors)
}
const auto& logs = bucket->get_info().layout.logs;
+ if (logs.empty()) {
+ ldpp_dout(s, 5) << "ERROR: bucket=" << bucket_name << " has no log layouts" << dendl;
+ op_ret = -ENOENT;
+ return;
+ }
+
auto log = std::prev(logs.end());
if (gen) {
log = std::find_if(logs.begin(), logs.end(), rgw::matches_gen(*gen));
return;
}
+ const auto& logs = bucket->get_info().layout.logs;
+ if (logs.empty()) {
+ ldpp_dout(s, 5) << "ERROR: bucket=" << bucket_name << " has no log layouts" << dendl;
+ op_ret = -ENOENT;
+ return;
+ }
+
map<RGWObjCategory, RGWStorageStats> stats;
- const auto& latest_log = bucket->get_info().layout.logs.back();
- const auto& index = log_to_index_layout(latest_log);
+ const auto& index = log_to_index_layout(logs.back());
int ret = bucket->read_stats(s, index, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
if (ret < 0 && ret != -ENOENT) {
return;
}
- oldest_gen = bucket->get_info().layout.logs.front().gen;
- latest_gen = latest_log.gen;
+ oldest_gen = logs.front().gen;
+ latest_gen = logs.back().gen;
}
void RGWOp_BILog_Info::send_response() {
return set_cr_error(-ENOENT);
}
+ if (pbucket_info->layout.logs.empty()) {
+ return set_cr_done(); // no bilogs to trim
+ }
+
// query peers for sync status
set_status("fetching sync status from relevant peers");
yield {
bool new_sync_enabled = info.datasync_flag_enabled();
bool old_sync_enabled = orig_info.datasync_flag_enabled();
- if (old_sync_enabled != new_sync_enabled) {
- int shards_num = rgw::current_num_shards(info.layout);
- int shard_id = info.layout.current_index.layout.normal.num_shards? 0 : -1;
- const auto& log_layout = info.layout.logs.back();
-
- int ret;
- if (!new_sync_enabled) {
- ret = svc.bilog->log_stop(dpp, info, log_layout, -1);
- } else {
- ret = svc.bilog->log_start(dpp, info, log_layout, -1);
- }
- if (ret < 0) {
- ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
- return ret;
- }
+ if (old_sync_enabled == new_sync_enabled) {
+ return 0; // datasync flag didn't change
+ }
+ if (info.layout.logs.empty()) {
+ return 0; // no bilog
+ }
+ const auto& bilog = info.layout.logs.back();
+ if (bilog.layout.type != rgw::BucketLogType::InIndex) {
+ return -ENOTSUP;
+ }
+ const int shards_num = rgw::num_shards(bilog.layout.in_index);
- for (int i = 0; i < shards_num; ++i, ++shard_id) {
- ret = svc.datalog_rados->add_entry(dpp, info, info.layout.logs.back(),
- shard_id);
- if (ret < 0) {
- ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << shard_id << ")" << dendl;
- return ret;
- }
- }
+ int ret;
+ if (!new_sync_enabled) {
+ ret = svc.bilog->log_stop(dpp, info, bilog, -1);
+ } else {
+ ret = svc.bilog->log_start(dpp, info, bilog, -1);
+ }
+ if (ret < 0) {
+ ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
+ return ret;
+ }
+
+ for (int i = 0; i < shards_num; ++i) {
+ ret = svc.datalog_rados->add_entry(dpp, info, bilog, i);
+ if (ret < 0) {
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << i << ")" << dendl;
+ } // datalog error is not fatal
}
return 0;