out << indented{width, "zone"} << zone.id << " (" << zone.name << ")\n";
out << indented{width, "bucket"} << info.bucket << "\n\n";
- if (!info.datasync_flag_enabled()) {
+ if (!info.bucket_datasync_enabled()) {
out << "Sync is disabled for bucket " << info.bucket.name << '\n';
return 0;
}
}
int RGWDataChangesLog::add_entry(const RGWBucketInfo& bucket_info, int shard_id) {
- if (!svc.zone->need_to_log_data() &&
- (!bucket_info.sync_policy || !bucket_info.sync_policy->zone_is_source(svc.zone->zone_id()))) {
+ if (!bucket_info.bucket_datasync_enabled(svc.zone)) {
return 0;
}
#include "rgw_crypt_sanitize.h"
#include "rgw_bucket_sync.h"
+#include "services/svc_zone.h"
+
#include <sstream>
#define dout_context g_ceph_context
return sync_policy->empty();
}
+
+bool RGWBucketInfo::bucket_is_sync_source(const string& zone_id) const
+{
+ return (sync_policy &&
+ sync_policy->zone_is_source(zone_id));
+}
+
+bool RGWBucketInfo::bucket_datasync_enabled(const RGWSI_Zone *zone_svc) const
+{
+ if (bucket_is_sync_source(zone_svc->zone_id())) {
+ return true;
+ }
+
+ return (zone_svc->need_to_log_data() &&
+ datasync_flag_enabled());
+}
}
};
+inline ostream& operator<<(ostream& out, const rgw_bucket_shard& bs) {
+ if (bs.shard_id <= 0) {
+ return out << bs.bucket;
+ }
+
+ return out << bs.bucket << ":" << bs.shard_id;
+}
+
struct rgw_bucket_placement {
rgw_placement_rule placement_rule;
rgw_bucket bucket;
}
struct RGWBucketSyncPolicy;
+class RGWSI_Zone;
struct RGWBucketInfo {
enum BIShardsHashType {
void set_sync_policy(RGWBucketSyncPolicy&& policy);
bool empty_sync_policy() const;
+ bool bucket_is_sync_source(const string& zone_id) const;
+ bool bucket_datasync_enabled(const RGWSI_Zone *zone_svc) const;
RGWBucketInfo();
~RGWBucketInfo();
return r;
}
- if (target->bucket_info.datasync_flag_enabled()) {
- r = store->data_log->add_entry(target->bucket_info, bs->shard_id);
- if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
- return r;
- }
+ r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id);
+ if (r < 0) {
+ lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
+ return r;
}
return 0;
ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
- if (target->bucket_info.datasync_flag_enabled()) {
- int r = store->data_log->add_entry(target->bucket_info, bs->shard_id);
- if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
- }
+ int r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id);
+ if (r < 0) {
+ lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
}
return ret;
ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace);
- if (target->bucket_info.datasync_flag_enabled()) {
- int r = store->data_log->add_entry(target->bucket_info, bs->shard_id);
- if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
- }
+ int r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id);
+ if (r < 0) {
+ lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
}
return ret;
* for following the specific bucket shard log. Otherwise they end up staying behind, and users
* have no way to tell that they're all caught up
*/
- if (target->bucket_info.datasync_flag_enabled()) {
- int r = store->data_log->add_entry(target->bucket_info, bs->shard_id);
- if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
- }
+ int r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id);
+ if (r < 0) {
+ lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
}
return ret;
return r;
}
- if (log_data_change && bucket_info.datasync_flag_enabled()) {
- data_log->add_entry(bucket_info, bs.shard_id);
+ r = svc.datalog_rados->add_entry(bucket_info, bs.shard_id);
+ if (r < 0) {
+ ldout(cct, 0) << "ERROR: failed writing data log" << dendl;
}
return 0;
int RGWSI_BucketIndex_RADOS::handle_overwrite(const RGWBucketInfo& info,
const RGWBucketInfo& orig_info)
{
- if (orig_info.datasync_flag_enabled() != info.datasync_flag_enabled()) {
+ bool new_sync_enabled = info.bucket_datasync_enabled(svc.zone);
+ bool old_sync_enabled = orig_info.bucket_datasync_enabled(svc.zone);
+
+ if (old_sync_enabled != new_sync_enabled) {
int shards_num = info.num_shards? info.num_shards : 1;
int shard_id = info.num_shards? 0 : -1;
int ret;
- if (!info.datasync_flag_enabled()) {
+ if (!new_sync_enabled) {
ret = svc.bilog->log_stop(info, -1);
} else {
ret = svc.bilog->log_start(info, -1);
return *current_period;
}
-const string& RGWSI_Zone::get_current_period_id()
+const string& RGWSI_Zone::get_current_period_id() const
{
return current_period->get_id();
}
return zone_short_id;
}
-const string& RGWSI_Zone::zone_name()
+const string& RGWSI_Zone::zone_name() const
{
return get_zone_params().get_name();
}
-const string& RGWSI_Zone::zone_id()
+const string& RGWSI_Zone::zone_id() const
{
return get_zone_params().get_id();
}
int get_zonegroup(const string& id, RGWZoneGroup& zonegroup) const;
const RGWZone& get_zone() const;
- const string& zone_name();
- const string& zone_id();
+ const string& zone_name() const;
+ const string& zone_id() const;
uint32_t get_zone_short_id() const;
- const string& get_current_period_id();
+ const string& get_current_period_id() const;
bool has_zonegroup_api(const std::string& api) const;
bool zone_is_writeable();