void MDLog::kick_submitter()
{
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
submit_cond.Signal();
}
if (mds->is_standby_replay()) {
dout(1) << "Journal " << jp.front << " is being rewritten, "
<< "cannot replay in standby until an active MDS completes rewrite" << dendl;
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
return;
}
// Assign to ::journaler so that we can be aborted by ::shutdown while
// waiting for journaler recovery
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
journaler = front_journal;
}
dout(0) << "Journal " << jp.front << " is in unknown format " << front_journal->get_stream_format()
<< ", does this MDS daemon require upgrade?" << dendl;
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
journaler = NULL;
delete front_journal;
dout(4) << "Recovered journal " << jp.front << " in format " << front_journal->get_stream_format() << dendl;
journaler->set_write_error_handler(new C_MDL_WriteError(this));
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
return;
}
int erase_result = erase_waiter.wait();
ceph_assert(erase_result == 0);
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
delete new_journal;
return;
/* Reset the Journaler object to its default state */
dout(1) << "Journal rewrite complete, continuing with normal startup" << dendl;
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
delete new_journal;
return;
/* Trigger completion */
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
return;
}
num_events++;
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
return;
}
dout(10) << "_replay_thread kicking waiters" << dendl;
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
return;
}
void set_safe_pos(uint64_t pos)
{
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
ceph_assert(pos >= safe_pos);
safe_pos = pos;
}
void _journal_segment_subtree_map(MDSInternalContextBase *onsync);
public:
void start_new_segment() {
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
_start_new_segment();
}
void prepare_new_segment() {
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
_prepare_new_segment();
}
void journal_segment_subtree_map(MDSInternalContextBase *onsync=NULL) {
public:
void _start_entry(LogEvent *e);
void start_entry(LogEvent *e) {
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
_start_entry(e);
}
void cancel_entry(LogEvent *e);
void _submit_entry(LogEvent *e, MDSLogContextBase *c);
void submit_entry(LogEvent *e, MDSLogContextBase *c = 0) {
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
_submit_entry(e, c);
submit_cond.Signal();
}
void start_submit_entry(LogEvent *e, MDSLogContextBase *c = 0) {
- Mutex::Locker l(submit_mutex);
+ std::lock_guard l(submit_mutex);
_start_entry(e);
_submit_entry(e, c);
submit_cond.Signal();
dout(10) << "MDSIOContextBase::complete: " << typeid(*this).name() << dendl;
ceph_assert(mds != NULL);
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
dout(4) << "MDSIOContextBase::complete: dropping for stopping "
}
MDSDaemon::~MDSDaemon() {
- Mutex::Locker lock(mds_lock);
+ std::lock_guard lock(mds_lock);
delete mds_rank;
mds_rank = NULL;
f->dump_string("state", ceph_mds_state_name(mdsmap->get_state_gid(mds_gid_t(
monc->get_global_id()))));
if (mds_rank) {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mds_rank->dump_status(f);
}
ceph_assert(signum == SIGINT || signum == SIGTERM);
derr << "*** got signal " << sig_str(signum) << " ***" << dendl;
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
if (stopping) {
return;
}
bool MDSDaemon::ms_dispatch2(const Message::ref &m)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
if (stopping) {
return false;
}
if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT)
return false;
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
if (stopping) {
return false;
}
if (con->get_peer_type() != CEPH_ENTITY_TYPE_CLIENT)
return;
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
if (stopping) {
return;
}
void MDSDaemon::ms_handle_accept(Connection *con)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
if (stopping) {
return;
}
dout(20) << __func__ << dendl;
Context *ctx = new C_OnFinisher(new FunctionContext([this](int _) {
- Mutex::Locker locker(mds->mds_lock);
+ std::lock_guard locker(mds->mds_lock);
trim_expired_segments();
}), mds->finisher);
ctx->complete(0);
dout(20) << __func__ << dendl;
Context *ctx = new FunctionContext([this](int r) {
- Mutex::Locker locker(mds->mds_lock);
+ std::lock_guard locker(mds->mds_lock);
handle_write_head(r);
});
// Flush the journal header so that readers will start from after
void finish(int r) override {
Context *ctx = nullptr;
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
std::swap(on_finish, ctx);
}
if (ctx != nullptr) {
void MDSRank::damaged_unlocked()
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
damaged();
}
void *MDSRank::ProgressThread::entry()
{
- Mutex::Locker l(mds->mds_lock);
+ std::lock_guard l(mds->mds_lock);
while (true) {
while (!mds->stopping &&
mds->finished_queue.empty() &&
cond.wait();
}
} else if (command == "session ls") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
heartbeat_reset();
}
command_export_dir(f, path, (mds_rank_t)rank);
} else if (command == "dump cache") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
string path;
int r;
if(!cmd_getval(g_ceph_context, cmdmap, "path", path)) {
f->reset();
}
} else if (command == "cache status") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->cache_status(f);
} else if (command == "cache drop") {
int64_t timeout;
} else if (command == "dump tree") {
command_dump_tree(cmdmap, ss, f);
} else if (command == "dump loads") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
int r = balancer->dump_loads(f);
if (r != 0) {
ss << "Failed to dump loads: " << cpp_strerror(r);
f->reset();
}
} else if (command == "dump snaps") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
string server;
cmd_getval(g_ceph_context, cmdmap, "server", server);
if (server == "--server") {
}
}
} else if (command == "force_readonly") {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->force_readonly();
} else if (command == "dirfrag split") {
command_dirfrag_split(cmdmap, ss);
}
C_SaferCond scond;
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->enqueue_scrub(path, "", force, recursive, repair, f, &scond);
}
scond.wait();
{
C_SaferCond scond;
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->enqueue_scrub(path, tag, true, true, false, f, &scond);
}
scond.wait();
{
C_SaferCond scond;
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->flush_dentry(path, &scond);
}
int r = scond.wait();
C_SaferCond cond;
std::stringstream ss;
{
- Mutex::Locker locker(mds_lock);
+ std::lock_guard locker(mds_lock);
C_Flush_Journal *flush_journal = new C_Flush_Journal(mdcache, mdlog, this, &ss, &cond);
flush_journal->send();
}
void MDSRank::command_get_subtrees(Formatter *f)
{
ceph_assert(f != NULL);
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
std::list<CDir*> subtrees;
mdcache->list_subtrees(subtrees);
std::string_view path,
mds_rank_t target)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
filepath fp(path);
if (target == whoami || !mdsmap->is_up(target) || !mdsmap->is_in(target)) {
cmd_getval(g_ceph_context, cmdmap, "root", root);
if (!cmd_getval(g_ceph_context, cmdmap, "depth", depth))
depth = -1;
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
CInode *in = mdcache->cache_traverse(filepath(root.c_str()));
if (!in) {
ss << "root inode is not in cache";
cmdmap_t cmdmap,
std::ostream &ss)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
int64_t by = 0;
if (!cmd_getval(g_ceph_context, cmdmap, "bits", by)) {
ss << "missing bits argument";
cmdmap_t cmdmap,
std::ostream &ss)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
std::string path;
bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
if (!got) {
std::ostream &ss,
Formatter *f)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
std::string path;
bool got = cmd_getval(g_ceph_context, cmdmap, "path", path);
if (!got) {
void MDSRank::command_openfiles_ls(Formatter *f)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
mdcache->dump_openfiles(f);
}
void MDSRank::command_dump_inode(Formatter *f, const cmdmap_t &cmdmap, std::ostream &ss)
{
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
int64_t number;
bool got = cmd_getval(g_ceph_context, cmdmap, "number", number);
if (!got) {
objecter->wait_for_latest_osdmap(
new C_OnFinisher(
new FunctionContext([this, fn](int r) {
- Mutex::Locker l(mds_lock);
+ std::lock_guard l(mds_lock);
auto epoch = objecter->with_osdmap([](const OSDMap &o){
return o.get_epoch();
});
void MDSRank::command_cache_drop(uint64_t timeout, Formatter *f, Context *on_finish) {
dout(20) << __func__ << dendl;
- Mutex::Locker locker(mds_lock);
+ std::lock_guard locker(mds_lock);
C_Drop_Cache *request = new C_Drop_Cache(server, mdcache, mdlog, this,
timeout, f, on_finish);
request->send();
}
{
f->open_array_section("events");
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
for (auto& i : events) {
f->dump_object("event", i);
}
void PurgeQueue::init()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(logger != nullptr);
void PurgeQueue::activate()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (journaler.get_read_pos() == journaler.get_write_pos())
return;
if (in_flight.empty()) {
dout(4) << "start work (by drain)" << dendl;
finisher.queue(new FunctionContext([this](int r) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
_consume();
}));
}
void PurgeQueue::shutdown()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
journaler.shutdown();
timer.shutdown();
{
dout(4) << "opening" << dendl;
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (completion)
waiting_for_recovery.push_back(completion);
"creating it." << dendl;
create(NULL);
} else if (r == 0) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(4) << "open complete" << dendl;
// Journaler only guarantees entries before head write_pos have been
void PurgeQueue::wait_for_recovery(Context* c)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (recovered)
c->complete(0);
else
!journaler.get_error() &&
journaler.get_read_pos() < journaler.get_write_pos()) {
journaler.wait_for_readable(new FunctionContext([this](int r) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
_recover();
}));
return;
void PurgeQueue::create(Context *fin)
{
dout(4) << "creating" << dendl;
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (fin)
waiting_for_recovery.push_back(fin);
journaler.set_writeable();
journaler.create(&layout, JOURNAL_FORMAT_RESILIENT);
journaler.write_head(new FunctionContext([this](int r) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
recovered = true;
finish_contexts(g_ceph_context, waiting_for_recovery);
}));
void PurgeQueue::push(const PurgeItem &pi, Context *completion)
{
dout(4) << "pushing inode " << pi.ino << dendl;
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
// Callers should have waited for open() before using us
ceph_assert(!journaler.is_readonly());
// via the same Journaler instance, we never need to reread_head
if (!journaler.have_waiter()) {
journaler.wait_for_readable(new FunctionContext([this](int r) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (r == 0) {
_consume();
} else if (r != -EAGAIN) {
gather.set_finisher(new C_OnFinisher(
new FunctionContext([this, expire_to](int r){
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
_execute_item_complete(expire_to);
_consume();
void PurgeQueue::update_op_limit(const MDSMap &mds_map)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
uint64_t pg_count = 0;
objecter->with_osdmap([&](const OSDMap& o) {
|| changed.count("mds_max_purge_ops_per_pg")) {
update_op_limit(mds_map);
} else if (changed.count("mds_max_purge_files")) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (in_flight.empty()) {
// We might have gone from zero to a finite limit, so
// might need to kick off consume.
dout(4) << "maybe start work again (max_purge_files="
<< conf->mds_max_purge_files << dendl;
finisher.queue(new FunctionContext([this](int r){
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
_consume();
}));
}