{
int ret;
- if (need_watch_notify()) {
- ret = init_watch();
- if (ret < 0) {
- lderr(cct) << "ERROR: failed to initialize watch" << dendl;
- return ret;
- }
- }
-
ret = region.init(cct, this);
if (ret < 0)
return ret;
}
}
+ if (need_watch_notify()) {
+ ret = init_watch();
+ if (ret < 0) {
+ lderr(cct) << "ERROR: failed to initialize watch" << dendl;
+ return ret;
+ }
+ }
+
map<string, RGWZone>::iterator ziter;
for (ziter = region.zones.begin(); ziter != region.zones.end(); ++ziter) {
const string& name = ziter->first;
return r;
}
+ watch_initialized = true;
+
return 0;
}
int RGWRados::distribute(const string& key, bufferlist& bl)
{
+ /*
+ * we were called before watch was initialized. This can only happen if we're updating some system
+ * config object (e.g., zone info) during init. Don't try to distribute the cache info for these
+ * objects, they're currently only read on startup anyway.
+ */
+ if (!watch_initialized)
+ return 0;
+
string notify_oid;
pick_control_oid(key, notify_oid);
uint64_t *watch_handles;
librados::IoCtx root_pool_ctx; // .rgw
librados::IoCtx control_pool_ctx; // .rgw.control
+ bool watch_initialized;
Mutex bucket_id_lock;
uint64_t max_bucket_id;
RGWRados() : lock("rados_timer_lock"), timer(NULL),
gc(NULL), use_gc_thread(false),
num_watchers(0), watchers(NULL), watch_handles(NULL),
+ watch_initialized(false),
bucket_id_lock("rados_bucket_id"), max_bucket_id(0),
cct(NULL), rados(NULL),
pools_initialized(false),