return set_cr_done();
}
reenter(this) {
+ last_renew_try_time = ceph::coarse_mono_clock::now();
while (!going_down) {
yield call(new RGWSimpleRadosLockCR(async_rados, store, obj, lock_name, cookie, interval));
+ current_time = ceph::coarse_mono_clock::now();
+ if (current_time - last_renew_try_time > interval_tolerance) {
+ // renewal should happen between 50%-90% of interval
+ ldout(store->ctx(), 1) << *this << ": WARNING: did not renew lock " << obj << ":" << lock_name << ": within 90\% of interval. " <<
+ (current_time - last_renew_try_time) << " > " << interval_tolerance << dendl;
+ }
+ last_renew_try_time = current_time;
caller->set_sleeping(false); /* will only be relevant when we return, that's why we can do it early */
if (retcode < 0) {
ldout(store->ctx(), 20) << *this << ": couldn't lock " << obj << ":" << lock_name << ": retcode=" << retcode << dendl;
return set_state(RGWCoroutine_Error, retcode);
}
+ ldout(store->ctx(), 20) << *this << ": successfully locked " << obj << ":" << lock_name << dendl;
set_locked(true);
yield wait(utime_t(interval / 2, 0));
}
#include "common/Throttle.h"
#include <atomic>
+#include "common/ceph_time.h"
#include "services/svc_sys_obj.h"
#include "services/svc_bucket.h"
int interval;
bool going_down{ false };
bool locked{false};
+
+ const ceph::timespan interval_tolerance;
+ const ceph::timespan ts_interval;
RGWCoroutine *caller;
bool aborted{false};
+
+ ceph::coarse_mono_time last_renew_try_time;
+ ceph::coarse_mono_time current_time;
public:
RGWContinuousLeaseCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store,
: RGWCoroutine(_store->ctx()), async_rados(_async_rados), store(_store),
obj(_obj), lock_name(_lock_name),
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
- interval(_interval), caller(_caller)
+ interval(_interval), interval_tolerance(ceph::make_timespan(9*interval/10)), ts_interval(ceph::make_timespan(interval)),
+ caller(_caller)
{}
virtual ~RGWContinuousLeaseCR() override;
int operate(const DoutPrefixProvider *dpp) override;
bool is_locked() const {
+ if (ceph::coarse_mono_clock::now() - last_renew_try_time > ts_interval) {
+ return false;
+ }
return locked;
}
if (!lease_cr->is_locked()) {
lease_cr->go_down();
drain_all();
+ tn->log(1, "lease is lost, abort");
return set_cr_error(-ECANCELED);
}
omapvals = std::make_shared<RGWRadosGetOmapValsCR::Result>();
if (!lease_cr->is_locked()) {
lease_cr->go_down();
drain_all();
+ tn->log(1, "lease is lost, abort");
return set_cr_error(-ECANCELED);
}
current_modified.clear();
do {
if (lease_cr && !lease_cr->is_locked()) {
drain_all();
+ tn->log(1, "no lease or lease is lost, abort");
return set_cr_error(-ECANCELED);
}
set_status("listing remote bucket");
for (; entries_iter != list_result.entries.end(); ++entries_iter) {
if (lease_cr && !lease_cr->is_locked()) {
drain_all();
+ tn->log(1, "no lease or lease is lost, abort");
return set_cr_error(-ECANCELED);
}
tn->log(20, SSTR("[full sync] syncing object: "
});
tn->unset_flag(RGW_SNS_FLAG_ACTIVE);
if (lease_cr && !lease_cr->is_locked()) {
+ tn->log(1, "no lease or lease is lost, abort");
return set_cr_error(-ECANCELED);
}
yield call(marker_tracker.flush());
do {
if (lease_cr && !lease_cr->is_locked()) {
drain_all();
- tn->log(0, "ERROR: lease is not taken, abort");
+ tn->log(1, "no lease or lease is lost, abort");
return set_cr_error(-ECANCELED);
}
tn->log(20, SSTR("listing bilog for incremental sync; position=" << sync_info.inc_marker.position));
for (; entries_iter != entries_end; ++entries_iter) {
if (lease_cr && !lease_cr->is_locked()) {
drain_all();
+ tn->log(1, "no lease or lease is lost, abort");
return set_cr_error(-ECANCELED);
}
entry = &(*entries_iter);
yield spawn(bucket_lease_cr.get(), false);
while (!bucket_lease_cr->is_locked()) {
if (bucket_lease_cr->is_done()) {
- tn->log(5, "ERROR: failed to take bucket lease");
+ tn->log(5, "failed to take lease");
set_status("lease lock failed, early abort");
drain_all();
return set_cr_error(bucket_lease_cr->get_ret_status());
yield spawn(bucket_lease_cr.get(), false);
while (!bucket_lease_cr->is_locked()) {
if (bucket_lease_cr->is_done()) {
- tn->log(5, "ERROR: failed to take bucket lease");
+ tn->log(5, "failed to take lease");
set_status("lease lock failed, early abort");
drain_all();
return set_cr_error(bucket_lease_cr->get_ret_status());
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
- ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl;
+ ldpp_dout(dpp, 5) << "failed to take lease" << dendl;
set_status("lease lock failed, early abort");
return set_cr_error(lease_cr->get_ret_status());
}
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
- ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl;
- set_status("failed acquiring lock");
+ ldpp_dout(dpp, 5) << "failed to take lease" << dendl;
+ set_status("lease lock failed, early abort");
return set_cr_error(lease_cr->get_ret_status());
}
set_sleeping(true);
for (; iter != result.keys.end(); ++iter) {
if (!lease_cr->is_locked()) {
lost_lock = true;
+ tn->log(1, "lease is lost, abort");
break;
}
yield; // allow entries_index consumer to make progress
/* sync! */
do {
if (!lease_cr->is_locked()) {
- tn->log(10, "lost lease");
+ tn->log(1, "lease is lost, abort");
lost_lock = true;
break;
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
drain_all();
- tn->log(10, "failed to take lease");
+ tn->log(5, "failed to take lease");
return lease_cr->get_ret_status();
}
set_sleeping(true);
do {
if (!lease_cr->is_locked()) {
lost_lock = true;
- tn->log(10, "lost lease");
+ tn->log(1, "lease is lost, abort");
break;
}
#define INCREMENTAL_MAX_ENTRIES 100