From: Sage Weil Date: Tue, 16 Oct 2018 19:07:33 +0000 (-0500) Subject: mds: Mutex::Locker -> std::lock_guard X-Git-Tag: v14.1.0~1175^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=b023007be7f09a0dd03604639ddcea5f45c9bf71;p=ceph.git mds: Mutex::Locker -> std::lock_guard Signed-off-by: Sage Weil --- diff --git a/src/mds/MDLog.cc b/src/mds/MDLog.cc index 5990693e6e54..ee2d36bfff25 100644 --- a/src/mds/MDLog.cc +++ b/src/mds/MDLog.cc @@ -476,7 +476,7 @@ void MDLog::flush() void MDLog::kick_submitter() { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); submit_cond.Signal(); } @@ -978,7 +978,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) if (mds->is_standby_replay()) { dout(1) << "Journal " << jp.front << " is being rewritten, " << "cannot replay in standby until an active MDS completes rewrite" << dendl; - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { return; } @@ -1033,7 +1033,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) // Assign to ::journaler so that we can be aborted by ::shutdown while // waiting for journaler recovery { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); journaler = front_journal; } @@ -1059,7 +1059,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) dout(0) << "Journal " << jp.front << " is in unknown format " << front_journal->get_stream_format() << ", does this MDS daemon require upgrade?" << dendl; { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { journaler = NULL; delete front_journal; @@ -1074,7 +1074,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) dout(4) << "Recovered journal " << jp.front << " in format " << front_journal->get_stream_format() << dendl; journaler->set_write_error_handler(new C_MDL_WriteError(this)); { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { return; } @@ -1252,7 +1252,7 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa int erase_result = erase_waiter.wait(); ceph_assert(erase_result == 0); { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { delete new_journal; return; @@ -1270,7 +1270,7 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa /* Reset the Journaler object to its default state */ dout(1) << "Journal rewrite complete, continuing with normal startup" << dendl; { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { delete new_journal; return; @@ -1282,7 +1282,7 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa /* Trigger completion */ { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { return; } @@ -1429,7 +1429,7 @@ void MDLog::_replay_thread() num_events++; { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { return; } @@ -1455,7 +1455,7 @@ void MDLog::_replay_thread() dout(10) << "_replay_thread kicking waiters" << dendl; { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { return; } diff --git a/src/mds/MDLog.h b/src/mds/MDLog.h index 5fa77a43e070..6e37e9c7845c 100644 --- a/src/mds/MDLog.h +++ b/src/mds/MDLog.h @@ -142,7 +142,7 @@ protected: void set_safe_pos(uint64_t pos) { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); ceph_assert(pos >= safe_pos); safe_pos = pos; } @@ -218,11 +218,11 @@ private: void _journal_segment_subtree_map(MDSInternalContextBase *onsync); public: void start_new_segment() { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); _start_new_segment(); } void prepare_new_segment() { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); _prepare_new_segment(); } void journal_segment_subtree_map(MDSInternalContextBase *onsync=NULL) { @@ -275,18 +275,18 @@ private: public: void _start_entry(LogEvent *e); void start_entry(LogEvent *e) { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); _start_entry(e); } void cancel_entry(LogEvent *e); void _submit_entry(LogEvent *e, MDSLogContextBase *c); void submit_entry(LogEvent *e, MDSLogContextBase *c = 0) { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); _submit_entry(e, c); submit_cond.Signal(); } void start_submit_entry(LogEvent *e, MDSLogContextBase *c = 0) { - Mutex::Locker l(submit_mutex); + std::lock_guard l(submit_mutex); _start_entry(e); _submit_entry(e, c); submit_cond.Signal(); diff --git a/src/mds/MDSContext.cc b/src/mds/MDSContext.cc index b6eb2750bbde..d2b0b9bbbeca 100644 --- a/src/mds/MDSContext.cc +++ b/src/mds/MDSContext.cc @@ -101,7 +101,7 @@ void MDSIOContextBase::complete(int r) { dout(10) << "MDSIOContextBase::complete: " << typeid(*this).name() << dendl; ceph_assert(mds != NULL); - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); if (mds->is_daemon_stopping()) { dout(4) << "MDSIOContextBase::complete: dropping for stopping " diff --git a/src/mds/MDSDaemon.cc b/src/mds/MDSDaemon.cc index 8f87301761a2..c60708d3a7ec 100644 --- a/src/mds/MDSDaemon.cc +++ b/src/mds/MDSDaemon.cc @@ -95,7 +95,7 @@ MDSDaemon::MDSDaemon(std::string_view n, Messenger *m, MonClient *mc) : } MDSDaemon::~MDSDaemon() { - Mutex::Locker lock(mds_lock); + std::lock_guard lock(mds_lock); delete mds_rank; mds_rank = NULL; @@ -162,7 +162,7 @@ void MDSDaemon::dump_status(Formatter *f) f->dump_string("state", ceph_mds_state_name(mdsmap->get_state_gid(mds_gid_t( monc->get_global_id())))); if (mds_rank) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); mds_rank->dump_status(f); } @@ -1044,7 +1044,7 @@ void MDSDaemon::handle_signal(int signum) ceph_assert(signum == SIGINT || signum == SIGTERM); derr << "*** got signal " << sig_str(signum) << " ***" << dendl; { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); if (stopping) { return; } @@ -1153,7 +1153,7 @@ void MDSDaemon::respawn() bool MDSDaemon::ms_dispatch2(const Message::ref &m) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); if (stopping) { return false; } @@ -1245,7 +1245,7 @@ bool MDSDaemon::ms_handle_reset(Connection *con) if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT) return false; - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); if (stopping) { return false; } @@ -1272,7 +1272,7 @@ void MDSDaemon::ms_handle_remote_reset(Connection *con) if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT) return; - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); if (stopping) { return; } @@ -1387,7 +1387,7 @@ int MDSDaemon::ms_handle_authentication(Connection *con) void MDSDaemon::ms_handle_accept(Connection *con) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); if (stopping) { return; } diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 733a2d785e4a..728b3301cec8 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -180,7 +180,7 @@ private: dout(20) << __func__ << dendl; Context *ctx = new C_OnFinisher(new FunctionContext([this](int _) { - Mutex::Locker locker(mds->mds_lock); + std::lock_guard locker(mds->mds_lock); trim_expired_segments(); }), mds->finisher); ctx->complete(0); @@ -205,7 +205,7 @@ private: dout(20) << __func__ << dendl; Context *ctx = new FunctionContext([this](int r) { - Mutex::Locker locker(mds->mds_lock); + std::lock_guard locker(mds->mds_lock); handle_write_head(r); }); // Flush the journal header so that readers will start from after @@ -286,7 +286,7 @@ private: void finish(int r) override { Context *ctx = nullptr; { - Mutex::Locker locker(lock); + std::lock_guard locker(lock); std::swap(on_finish, ctx); } if (ctx != nullptr) { @@ -897,7 +897,7 @@ void MDSRank::damaged() void MDSRank::damaged_unlocked() { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); damaged(); } @@ -923,7 +923,7 @@ void MDSRank::handle_write_error(int err) void *MDSRank::ProgressThread::entry() { - Mutex::Locker l(mds->mds_lock); + std::lock_guard l(mds->mds_lock); while (true) { while (!mds->stopping && mds->finished_queue.empty() && @@ -2412,7 +2412,7 @@ bool MDSRankDispatcher::handle_asok_command(std::string_view command, cond.wait(); } } else if (command == "session ls") { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); heartbeat_reset(); @@ -2467,7 +2467,7 @@ bool MDSRankDispatcher::handle_asok_command(std::string_view command, } command_export_dir(f, path, (mds_rank_t)rank); } else if (command == "dump cache") { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); string path; int r; if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) { @@ -2481,7 +2481,7 @@ bool MDSRankDispatcher::handle_asok_command(std::string_view command, f->reset(); } } else if (command == "cache status") { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); mdcache->cache_status(f); } else if (command == "cache drop") { int64_t timeout; @@ -2498,14 +2498,14 @@ bool MDSRankDispatcher::handle_asok_command(std::string_view command, } else if (command == "dump tree") { command_dump_tree(cmdmap, ss, f); } else if (command == "dump loads") { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); int r = balancer->dump_loads(f); if (r != 0) { ss << "Failed to dump loads: " << cpp_strerror(r); f->reset(); } } else if (command == "dump snaps") { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); string server; cmd_getval(g_ceph_context, cmdmap, "server", server); if (server == "--server") { @@ -2522,7 +2522,7 @@ bool MDSRankDispatcher::handle_asok_command(std::string_view command, } } } else if (command == "force_readonly") { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); mdcache->force_readonly(); } else if (command == "dirfrag split") { command_dirfrag_split(cmdmap, ss); @@ -2664,7 +2664,7 @@ void MDSRank::command_scrub_path(Formatter *f, std::string_view path, vectorenqueue_scrub(path, "", force, recursive, repair, f, &scond); } scond.wait(); @@ -2676,7 +2676,7 @@ void MDSRank::command_tag_path(Formatter *f, { C_SaferCond scond; { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); mdcache->enqueue_scrub(path, tag, true, true, false, f, &scond); } scond.wait(); @@ -2686,7 +2686,7 @@ void MDSRank::command_flush_path(Formatter *f, std::string_view path) { C_SaferCond scond; { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); mdcache->flush_dentry(path, &scond); } int r = scond.wait(); @@ -2703,7 +2703,7 @@ void MDSRank::command_flush_journal(Formatter *f) { C_SaferCond cond; std::stringstream ss; { - Mutex::Locker locker(mds_lock); + std::lock_guard locker(mds_lock); C_Flush_Journal *flush_journal = new C_Flush_Journal(mdcache, mdlog, this, &ss, &cond); flush_journal->send(); } @@ -2718,7 +2718,7 @@ void MDSRank::command_flush_journal(Formatter *f) { void MDSRank::command_get_subtrees(Formatter *f) { ceph_assert(f != NULL); - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); std::list subtrees; mdcache->list_subtrees(subtrees); @@ -2757,7 +2757,7 @@ int MDSRank::_command_export_dir( std::string_view path, mds_rank_t target) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); filepath fp(path); if (target == whoami || !mdsmap->is_up(target) || !mdsmap->is_in(target)) { @@ -2787,7 +2787,7 @@ void MDSRank::command_dump_tree(const cmdmap_t &cmdmap, std::ostream &ss, Format cmd_getval(g_ceph_context, cmdmap, "root", root); if (!cmd_getval(g_ceph_context, cmdmap, "depth", depth)) depth = -1; - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); CInode *in = mdcache->cache_traverse(filepath(root.c_str())); if (!in) { ss << "root inode is not in cache"; @@ -2851,7 +2851,7 @@ bool MDSRank::command_dirfrag_split( cmdmap_t cmdmap, std::ostream &ss) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); int64_t by = 0; if (!cmd_getval(g_ceph_context, cmdmap, "bits", by)) { ss << "missing bits argument"; @@ -2877,7 +2877,7 @@ bool MDSRank::command_dirfrag_merge( cmdmap_t cmdmap, std::ostream &ss) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); std::string path; bool got = cmd_getval(g_ceph_context, cmdmap, "path", path); if (!got) { @@ -2913,7 +2913,7 @@ bool MDSRank::command_dirfrag_ls( std::ostream &ss, Formatter *f) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); std::string path; bool got = cmd_getval(g_ceph_context, cmdmap, "path", path); if (!got) { @@ -2949,13 +2949,13 @@ bool MDSRank::command_dirfrag_ls( void MDSRank::command_openfiles_ls(Formatter *f) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); mdcache->dump_openfiles(f); } void MDSRank::command_dump_inode(Formatter *f, const cmdmap_t &cmdmap, std::ostream &ss) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); int64_t number; bool got = cmd_getval(g_ceph_context, cmdmap, "number", number); if (!got) { @@ -3230,7 +3230,7 @@ bool MDSRank::evict_client(int64_t session_id, objecter->wait_for_latest_osdmap( new C_OnFinisher( new FunctionContext([this, fn](int r) { - Mutex::Locker l(mds_lock); + std::lock_guard l(mds_lock); auto epoch = objecter->with_osdmap([](const OSDMap &o){ return o.get_epoch(); }); @@ -3408,7 +3408,7 @@ void MDSRank::cache_drop_send_reply(Formatter *f, C_MDS_Send_Command_Reply *repl void MDSRank::command_cache_drop(uint64_t timeout, Formatter *f, Context *on_finish) { dout(20) << __func__ << dendl; - Mutex::Locker locker(mds_lock); + std::lock_guard locker(mds_lock); C_Drop_Cache *request = new C_Drop_Cache(server, mdcache, mdlog, this, timeout, f, on_finish); request->send(); diff --git a/src/mds/Mutation.cc b/src/mds/Mutation.cc index becb040bb5d3..78f0e8cbb712 100644 --- a/src/mds/Mutation.cc +++ b/src/mds/Mutation.cc @@ -443,7 +443,7 @@ void MDRequestImpl::_dump(Formatter *f) const } { f->open_array_section("events"); - Mutex::Locker l(lock); + std::lock_guard l(lock); for (auto& i : events) { f->dump_object("event", i); } diff --git a/src/mds/PurgeQueue.cc b/src/mds/PurgeQueue.cc index 5e2df73c489d..31b1c09777ff 100644 --- a/src/mds/PurgeQueue.cc +++ b/src/mds/PurgeQueue.cc @@ -127,7 +127,7 @@ void PurgeQueue::create_logger() void PurgeQueue::init() { - Mutex::Locker l(lock); + std::lock_guard l(lock); ceph_assert(logger != nullptr); @@ -137,14 +137,14 @@ void PurgeQueue::init() void PurgeQueue::activate() { - Mutex::Locker l(lock); + std::lock_guard l(lock); if (journaler.get_read_pos() == journaler.get_write_pos()) return; if (in_flight.empty()) { dout(4) << "start work (by drain)" << dendl; finisher.queue(new FunctionContext([this](int r) { - Mutex::Locker l(lock); + std::lock_guard l(lock); _consume(); })); } @@ -152,7 +152,7 @@ void PurgeQueue::activate() void PurgeQueue::shutdown() { - Mutex::Locker l(lock); + std::lock_guard l(lock); journaler.shutdown(); timer.shutdown(); @@ -163,7 +163,7 @@ void PurgeQueue::open(Context *completion) { dout(4) << "opening" << dendl; - Mutex::Locker l(lock); + std::lock_guard l(lock); if (completion) waiting_for_recovery.push_back(completion); @@ -174,7 +174,7 @@ void PurgeQueue::open(Context *completion) "creating it." << dendl; create(NULL); } else if (r == 0) { - Mutex::Locker l(lock); + std::lock_guard l(lock); dout(4) << "open complete" << dendl; // Journaler only guarantees entries before head write_pos have been @@ -199,7 +199,7 @@ void PurgeQueue::open(Context *completion) void PurgeQueue::wait_for_recovery(Context* c) { - Mutex::Locker l(lock); + std::lock_guard l(lock); if (recovered) c->complete(0); else @@ -216,7 +216,7 @@ void PurgeQueue::_recover() !journaler.get_error() && journaler.get_read_pos() < journaler.get_write_pos()) { journaler.wait_for_readable(new FunctionContext([this](int r) { - Mutex::Locker l(lock); + std::lock_guard l(lock); _recover(); })); return; @@ -248,7 +248,7 @@ void PurgeQueue::_recover() void PurgeQueue::create(Context *fin) { dout(4) << "creating" << dendl; - Mutex::Locker l(lock); + std::lock_guard l(lock); if (fin) waiting_for_recovery.push_back(fin); @@ -258,7 +258,7 @@ void PurgeQueue::create(Context *fin) journaler.set_writeable(); journaler.create(&layout, JOURNAL_FORMAT_RESILIENT); journaler.write_head(new FunctionContext([this](int r) { - Mutex::Locker l(lock); + std::lock_guard l(lock); recovered = true; finish_contexts(g_ceph_context, waiting_for_recovery); })); @@ -270,7 +270,7 @@ void PurgeQueue::create(Context *fin) void PurgeQueue::push(const PurgeItem &pi, Context *completion) { dout(4) << "pushing inode " << pi.ino << dendl; - Mutex::Locker l(lock); + std::lock_guard l(lock); // Callers should have waited for open() before using us ceph_assert(!journaler.is_readonly()); @@ -388,7 +388,7 @@ bool PurgeQueue::_consume() // via the same Journaler instance, we never need to reread_head if (!journaler.have_waiter()) { journaler.wait_for_readable(new FunctionContext([this](int r) { - Mutex::Locker l(lock); + std::lock_guard l(lock); if (r == 0) { _consume(); } else if (r != -EAGAIN) { @@ -508,7 +508,7 @@ void PurgeQueue::_execute_item( gather.set_finisher(new C_OnFinisher( new FunctionContext([this, expire_to](int r){ - Mutex::Locker l(lock); + std::lock_guard l(lock); _execute_item_complete(expire_to); _consume(); @@ -578,7 +578,7 @@ void PurgeQueue::_execute_item_complete( void PurgeQueue::update_op_limit(const MDSMap &mds_map) { - Mutex::Locker l(lock); + std::lock_guard l(lock); uint64_t pg_count = 0; objecter->with_osdmap([&](const OSDMap& o) { @@ -615,14 +615,14 @@ void PurgeQueue::handle_conf_change(const ConfigProxy& conf, || changed.count("mds_max_purge_ops_per_pg")) { update_op_limit(mds_map); } else if (changed.count("mds_max_purge_files")) { - Mutex::Locker l(lock); + std::lock_guard l(lock); if (in_flight.empty()) { // We might have gone from zero to a finite limit, so // might need to kick off consume. dout(4) << "maybe start work again (max_purge_files=" << conf->mds_max_purge_files << dendl; finisher.queue(new FunctionContext([this](int r){ - Mutex::Locker l(lock); + std::lock_guard l(lock); _consume(); })); }