From: Matt Benjamin Date: Thu, 9 Apr 2020 19:31:51 +0000 (-0400) Subject: rgwlc: re-hook RGWRados::process_lc() X-Git-Tag: v15.2.5~56^2~11 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=2bfed865ae1e3ad6b18f154de8a7c1c741c8435c;p=ceph.git rgwlc: re-hook RGWRados::process_lc() Allow the admin command to force a (single threaded) lifecycle pass, do not instantiate background threads to do this. Fix an apparent shutdown hang (found in the process_lc() path, but actually general). Signed-off-by: Matt Benjamin (cherry picked from commit 1051156c29b6a523350bb29523db0b386572e602) --- diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index f18a45640e95..9bcf8f1a7edb 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -214,7 +214,7 @@ void *RGWLC::LCWorker::entry() { utime_t start = ceph_clock_now(); if (should_work(start)) { ldpp_dout(dpp, 2) << "life cycle: start" << dendl; - int r = lc->process(this); + int r = lc->process(this, false /* once */); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: do life cycle process() returned error r=" << r << dendl; @@ -1549,7 +1549,7 @@ static inline vector random_sequence(uint32_t n) return v; } -int RGWLC::process(LCWorker* worker) +int RGWLC::process(LCWorker* worker, bool once = false) { int max_secs = cct->_conf->rgw_lc_lock_max_time; @@ -1557,7 +1557,7 @@ int RGWLC::process(LCWorker* worker) * that might be running in parallel */ vector shard_seq = random_sequence(max_objs); for (auto index : shard_seq) { - int ret = process(index, max_secs, worker); + int ret = process(index, max_secs, worker, once); if (ret < 0) return ret; } @@ -1591,7 +1591,8 @@ time_t RGWLC::thread_stop_at() return time(nullptr) + interval; } -int RGWLC::process(int index, int max_lock_secs, LCWorker* worker) +int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, + bool once = false) { dout(5) << "RGWLC::process(): ENTER: " << "index: " << index << " worker ix: " << worker->ix @@ -1704,11 +1705,13 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker) l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]); ret = bucket_lc_process(entry.bucket, worker, thread_stop_at()); bucket_lc_post(index, max_lock_secs, entry, ret, worker); - } while(1); + } while(1 && !once); + + return 0; exit: - l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]); - return 0; + l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]); + return 0; } void RGWLC::start_processor() @@ -1812,7 +1815,6 @@ int RGWLC::LCWorker::schedule_next_start_time(utime_t &start, utime_t& now) RGWLC::LCWorker::~LCWorker() { - workpool->drain(); delete workpool; } /* ~LCWorker */ diff --git a/src/rgw/rgw_lc.h b/src/rgw/rgw_lc.h index fd8b565f2ae6..9aa64f65b07f 100644 --- a/src/rgw/rgw_lc.h +++ b/src/rgw/rgw_lc.h @@ -500,8 +500,8 @@ public: void initialize(CephContext *_cct, rgw::sal::RGWRadosStore *_store); void finalize(); - int process(LCWorker* worker); - int process(int index, int max_secs, LCWorker* worker); + int process(LCWorker* worker, bool once); + int process(int index, int max_secs, LCWorker* worker, bool once); bool if_already_run_today(time_t start_date); bool expired_session(time_t started); time_t thread_stop_at(); diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 1623c12f9efc..d193e1c8e34f 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -8043,7 +8043,12 @@ int RGWRados::list_lc_progress(const string& marker, uint32_t max_entries, int RGWRados::process_lc() { - return lc->process(nullptr); + RGWLC lc; + lc.initialize(cct, this->store); + RGWLC::LCWorker worker(&lc, cct, &lc, 0); + auto ret = lc.process(&worker, true /* once */); + lc.stop_processor(); // sets down_flag, but returns immediately + return ret; } bool RGWRados::process_expire_objects()