std::string ceph_version;
for (const auto &i : dmc) {
- Mutex::Locker l(i.second->lock);
+ std::lock_guard l(i.second->lock);
const auto &key = i.first;
const std::string &str_type = key.first;
const std::string &svc_name = key.second;
PyObject *ActivePyModules::get_server_python(const std::string &hostname)
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
dout(10) << " (" << hostname << ")" << dendl;
PyObject *ActivePyModules::list_servers_python()
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
dout(10) << " >" << dendl;
Py_RETURN_NONE;
}
- Mutex::Locker l(metadata->lock);
+ std::lock_guard l(metadata->lock);
PyFormatter f;
f.dump_string("hostname", metadata->hostname);
for (const auto &i : metadata->metadata) {
Py_RETURN_NONE;
}
- Mutex::Locker l(metadata->lock);
+ std::lock_guard l(metadata->lock);
PyFormatter f;
for (const auto &i : metadata->service_status) {
f.dump_string(i.first.c_str(), i.second);
PyObject *ActivePyModules::get_python(const std::string &what)
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
if (what == "fs_map") {
PyFormatter f;
auto dmc = daemon_state.get_by_service("osd");
for (const auto &i : dmc) {
- Mutex::Locker l(i.second->lock);
+ std::lock_guard l(i.second->lock);
f.open_object_section(i.first.second.c_str());
f.dump_string("hostname", i.second->hostname);
for (const auto &j : i.second->metadata) {
int ActivePyModules::start_one(PyModuleRef py_module)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(modules.count(py_module->get_name()) == 0);
void ActivePyModules::shutdown()
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
// Signal modules to drop out of serve() and/or tear down resources
for (auto &i : modules) {
void ActivePyModules::notify_all(const std::string ¬ify_type,
const std::string ¬ify_id)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(10) << __func__ << ": notify_all " << notify_type << dendl;
for (auto& i : modules) {
void ActivePyModules::notify_all(const LogEntry &log_entry)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(10) << __func__ << ": notify_all (clog)" << dendl;
for (auto& i : modules) {
const std::string &key, std::string *val) const
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
const std::string global_key = PyModule::config_prefix
const std::string &key, std::string *val) const
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
const std::string global_key = PyModule::config_prefix
dout(4) << __func__ << " key: " << global_key << dendl;
- Mutex::Locker lock(module_config.lock);
+ std::lock_guard lock(module_config.lock);
auto i = module_config.config.find(global_key);
if (i != module_config.config.end()) {
const std::string &prefix) const
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
const std::string base_prefix = PyModule::config_prefix
PyFormatter f;
- Mutex::Locker lock(module_config.lock);
+ std::lock_guard lock(module_config.lock);
for (auto p = store_cache.lower_bound(global_prefix);
p != store_cache.end() && p->first.find(global_prefix) == 0;
Command set_cmd;
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
if (val) {
std::map<std::string, std::string> ActivePyModules::get_services() const
{
std::map<std::string, std::string> result;
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
for (const auto& i : modules) {
const auto &module = i.second.get();
std::string svc_str = module->get_uri();
const std::string &path) const
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
PyFormatter f;
auto metadata = daemon_state.get(DaemonKey(svc_name, svc_id));
if (metadata) {
- Mutex::Locker l2(metadata->lock);
+ std::lock_guard l2(metadata->lock);
if (metadata->perf_counters.instances.count(path)) {
auto counter_instance = metadata->perf_counters.instances.at(path);
auto counter_type = metadata->perf_counters.types.at(path);
const std::string &svc_id)
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
DaemonStateCollection daemons;
daemon_name << key.first << "." << key.second;
f.open_object_section(daemon_name.str().c_str());
- Mutex::Locker l(state->lock);
+ std::lock_guard l(state->lock);
for (auto ctr_inst_iter : state->perf_counters.instances) {
const auto &counter_name = ctr_inst_iter.first;
f.open_object_section(counter_name.c_str());
PyObject *ActivePyModules::get_context()
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
// Construct a capsule containing ceph context.
PyObject *ActivePyModules::get_osdmap()
{
PyThreadState *tstate = PyEval_SaveThread();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
PyEval_RestoreThread(tstate);
OSDMap *newmap = new OSDMap;
void ActivePyModules::get_health_checks(health_check_map_t *checks)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
for (auto& p : modules) {
p.second->get_health_checks(checks);
}
void ActivePyModules::set_uri(const std::string& module_name,
const std::string &uri)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(4) << " module " << module_name << " set URI '" << uri << "'" << dendl;
void ActivePyModules::cluster_log(const std::string &channel, clog_type prio,
const std::string &message)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (channel == "audit") {
audit_clog->do_log(prio, message);
void ClusterState::set_objecter(Objecter *objecter_)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
objecter = objecter_;
}
void ClusterState::set_fsmap(FSMap const &new_fsmap)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
fsmap = new_fsmap;
}
void ClusterState::set_mgr_map(MgrMap const &new_mgrmap)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
mgr_map = new_mgrmap;
}
void ClusterState::set_service_map(ServiceMap const &new_service_map)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
servicemap = new_service_map;
}
void ClusterState::ingest_pgstats(MPGStats *stats)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
const int from = stats->get_orig_source().num();
void ClusterState::notify_osdmap(const OSDMap &osd_map)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
pending_inc.stamp = ceph_clock_now();
pending_inc.version = pg_map.version + 1; // to make apply_incremental happy
void notify_osdmap(const OSDMap &osd_map);
bool have_fsmap() const {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return fsmap.get_epoch() > 0;
}
template<typename Callback, typename...Args>
void with_servicemap(Callback&& cb, Args&&...args) const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::forward<Callback>(cb)(servicemap, std::forward<Args>(args)...);
}
template<typename Callback, typename...Args>
void with_fsmap(Callback&& cb, Args&&...args) const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::forward<Callback>(cb)(fsmap, std::forward<Args>(args)...);
}
template<typename Callback, typename...Args>
void with_mgrmap(Callback&& cb, Args&&...args) const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::forward<Callback>(cb)(mgr_map, std::forward<Args>(args)...);
}
auto with_pgmap(Callback&& cb, Args&&...args) const ->
decltype(cb(pg_map, std::forward<Args>(args)...))
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return std::forward<Callback>(cb)(pg_map, std::forward<Args>(args)...);
}
auto with_mutable_pgmap(Callback&& cb, Args&&...args) ->
decltype(cb(pg_map, std::forward<Args>(args)...))
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return std::forward<Callback>(cb)(pg_map, std::forward<Args>(args)...);
}
template<typename... Args>
void with_monmap(Args &&... args) const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(monc != nullptr);
monc->with_monmap(std::forward<Args>(args)...);
}
started_at = ceph_clock_now();
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
timer.init();
schedule_tick_locked(
}
if (con->get_peer_type() == CEPH_ENTITY_TYPE_OSD) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
s->osd_id = atoi(s->entity_name.get_id().c_str());
dout(10) << "registering osd." << s->osd_id << " session "
<< s << " con " << con << dendl;
if (!session) {
return false;
}
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(10) << "unregistering osd." << session->osd_id
<< " session " << session << " con " << con << dendl;
osd_cons[session->osd_id].erase(con);
// Fast path: we don't need to take lock because pgmap_ready
// is already set
} else {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (reported_osds.find(osd_id) == reported_osds.end()) {
dout(4) << "initial report from osd " << osd_id << dendl;
void DaemonServer::schedule_tick(double delay_sec)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
schedule_tick_locked(delay_sec);
}
// Send a fresh MMgrConfigure to all clients, so that they can follow
// the new policy for transmitting stats
finisher.queue(new FunctionContext([this](int r) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
for (auto &c : daemon_connections) {
if (c->peer_is_osd()) {
_send_configure(c);
msgr->wait();
dout(10) << "done" << dendl;
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
shutting_down = true;
timer.shutdown();
}
bool DaemonServer::handle_open(MMgrOpen *m)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
DaemonKey key = key_from_service(m->service_name,
m->get_connection()->get_peer_type(),
}
if (daemon) {
dout(20) << "updating existing DaemonState for " << m->daemon_name << dendl;
- Mutex::Locker l(daemon->lock);
+ std::lock_guard l(daemon->lock);
daemon->perf_counters.clear();
if (m->service_daemon) {
bool DaemonServer::handle_close(MMgrClose *m)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
DaemonKey key = key_from_service(m->service_name,
m->get_connection()->get_peer_type(),
DaemonStatePtr daemon = daemon_state.get(key);
daemon_state.rm(key);
{
- Mutex::Locker l(daemon->lock);
+ std::lock_guard l(daemon->lock);
if (daemon->service_daemon) {
pending_service_map.rm_daemon(m->service_name, m->daemon_name);
pending_service_map_dirty = pending_service_map.epoch;
}
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
// kill session
auto priv = m->get_connection()->get_priv();
auto session = static_cast<MgrSession*>(priv.get());
// Update the DaemonState
ceph_assert(daemon != nullptr);
{
- Mutex::Locker l(daemon->lock);
+ std::lock_guard l(daemon->lock);
auto &daemon_counters = daemon->perf_counters;
daemon_counters.update(m);
bool DaemonServer::handle_command(MCommand *m)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::shared_ptr<CommandContext> cmdctx = std::make_shared<CommandContext>(m);
try {
return _handle_command(m, cmdctx);
DaemonKey key(p.first, q.first);
ceph_assert(daemon_state.exists(key));
auto daemon = daemon_state.get(key);
- Mutex::Locker l(daemon->lock);
+ std::lock_guard l(daemon->lock);
f->dump_stream("status_stamp") << daemon->service_status_stamp;
f->dump_stream("last_beacon") << daemon->last_service_beacon;
f->open_object_section("status");
}
}
} else if (daemon->config_defaults_bl.length() > 0) {
- Mutex::Locker l(daemon->lock);
+ std::lock_guard l(daemon->lock);
TextTable tbl;
if (f) {
f->open_array_section("config");
continue;
}
auto daemon = daemon_state.get(key);
- Mutex::Locker l(daemon->lock);
+ std::lock_guard l(daemon->lock);
if (daemon->last_service_beacon == utime_t()) {
// we must have just restarted; assume they are alive now.
daemon->last_service_beacon = ceph_clock_now();
for (auto service : {"osd", "mon"} ) {
auto daemons = daemon_state.get_by_service(service);
for (const auto& [key,state] : daemons) {
- Mutex::Locker l{state->lock};
+ std::lock_guard l{state->lock};
for (const auto& metric : state->daemon_health_metrics) {
auto acc = accumulated.find(metric.get_type());
if (acc == accumulated.end()) {
void DaemonServer::got_service_map()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
cluster_state.with_servicemap([&](const ServiceMap& service_map) {
if (pending_service_map.epoch == 0) {
void DaemonServer::got_mgr_map()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
set<std::string> have;
cluster_state.with_mgrmap([&](const MgrMap& mgrmap) {
auto md_update = [&] (DaemonKey key) {
std::set<std::string> *ls) {
auto m = get_by_server(server);
for (auto& i : m) {
- Mutex::Locker l(i.second->lock);
+ std::lock_guard l(i.second->lock);
for (auto& j : i.second->devices) {
ls->insert(j.first);
}
DaemonStatePtr state;
if (daemon_state.exists(key)) {
state = daemon_state.get(key);
- Mutex::Locker l(state->lock);
+ std::lock_guard l(state->lock);
if (key.first == "mds" || key.first == "mgr") {
daemon_meta.erase("name");
} else if (key.first == "osd") {
void Mgr::background_init(Context *completion)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(!initializing);
ceph_assert(!initialized);
initializing = true;
void Mgr::init()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(initializing);
ceph_assert(!initialized);
{
finisher.queue(new FunctionContext([&](int) {
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
monc->sub_unwant("log-info");
monc->sub_unwant("mgrdigest");
monc->sub_unwant("fsmap");
if (daemon_state.exists(k)) {
auto metadata = daemon_state.get(k);
- Mutex::Locker l(metadata->lock);
+ std::lock_guard l(metadata->lock);
auto addr_iter = metadata->metadata.find("front_addr");
if (addr_iter != metadata->metadata.end()) {
const std::string &metadata_addr = addr_iter->second;
bool Mgr::ms_dispatch(Message *m)
{
dout(4) << *m << dendl;
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
switch (m->get_type()) {
case MSG_MGR_DIGEST:
bool update = false;
if (daemon_state.exists(k)) {
auto metadata = daemon_state.get(k);
- Mutex::Locker l(metadata->lock);
+ std::lock_guard l(metadata->lock);
if (metadata->metadata.empty() ||
metadata->metadata.count("addr") == 0) {
update = true;
bool Mgr::got_mgr_map(const MgrMap& m)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(10) << m << dendl;
set<string> old_modules;
std::map<std::string, std::string> Mgr::get_services() const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return py_module_registry->get_services();
}
void MgrClient::init()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(msgr != nullptr);
void MgrClient::shutdown()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ldout(cct, 10) << dendl;
if (connect_retry_callback) {
bool MgrClient::ms_dispatch(Message *m)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
switch(m->get_type()) {
case MSG_MGR_MAP:
bool MgrClient::ms_handle_reset(Connection *con)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (session && con == session->con) {
ldout(cct, 4) << __func__ << " con " << con << dendl;
reconnect();
void MgrClient::send_pgstats()
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
_send_pgstats();
}
bufferlist *outbl, string *outs,
Context *onfinish)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ldout(cct, 20) << "cmd: " << cmd << dendl;
const std::string& name,
const std::map<std::string,std::string>& metadata)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (service == "osd" ||
service == "mds" ||
service == "client" ||
int MgrClient::service_daemon_update_status(
std::map<std::string,std::string>&& status)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ldout(cct,10) << status << dendl;
daemon_status = std::move(status);
daemon_dirty_status = true;
void MgrClient::update_daemon_health(std::vector<DaemonHealthMetric>&& metrics)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
daemon_health_metrics = std::move(metrics);
}
std::function<void(OSDPerfMetricReport *)> cb_get)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
set_perf_queries_cb = cb_set;
get_perf_report_cb = cb_get;
}
void send_pgstats();
void set_pgstats_cb(std::function<MPGStats*()>&& cb_)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
pgstats_cb = std::move(cb_);
}
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
// Initialize Messenger
client_messenger->add_dispatcher_tail(this);
void MgrStandby::handle_signal(int signum)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(signum == SIGINT || signum == SIGTERM);
derr << "*** Got signal " << sig_str(signum) << " ***" << dendl;
shutdown();
[this](int r){
// Advertise our active-ness ASAP instead of waiting for
// next tick.
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
send_beacon();
}));
dout(1) << "I am now activating" << dendl;
bool MgrStandby::ms_dispatch(Message *m)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
dout(4) << state_str() << " " << *m << dendl;
if (m->get_type() == MSG_MGR_MAP) {
}
std::list<OSDPerfMetricQuery> OSDPerfMetricCollector::get_queries() {
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
std::list<OSDPerfMetricQuery> query_list;
for (auto &it : queries) {
bool notify = false;
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
query_id = next_query_id++;
auto it = queries.find(query);
bool notify = false;
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
for (auto it = queries.begin() ; it != queries.end(); it++) {
auto &ids = it->second;
bool notify;
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
notify = !queries.empty();
queries.clear();
const std::string global_key = PyModule::config_prefix
+ module_name + "/" + key;
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (val) {
config[global_key] = *val;
bool PyModule::is_option(const std::string &option_name)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return options.count(option_name) > 0;
}
*/
void get_commands(std::vector<ModuleCommand> *out) const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(out != nullptr);
out->insert(out->end(), commands.begin(), commands.end());
}
*/
void fail(const std::string &reason)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
failed = true;
error_string = reason;
}
bool is_enabled() const {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return enabled || always_on;
}
- bool is_failed() const { Mutex::Locker l(lock) ; return failed; }
- bool is_loaded() const { Mutex::Locker l(lock) ; return loaded; }
- bool is_always_on() const { Mutex::Locker l(lock) ; return always_on; }
+ bool is_failed() const { std::lock_guard l(lock) ; return failed; }
+ bool is_loaded() const { std::lock_guard l(lock) ; return loaded; }
+ bool is_always_on() const { std::lock_guard l(lock) ; return always_on; }
const std::string &get_name() const {
- Mutex::Locker l(lock) ; return module_name;
+ std::lock_guard l(lock) ; return module_name;
}
const std::string &get_error_string() const {
- Mutex::Locker l(lock) ; return error_string;
+ std::lock_guard l(lock) ; return error_string;
}
bool get_can_run() const {
- Mutex::Locker l(lock) ; return can_run;
+ std::lock_guard l(lock) ; return can_run;
}
};
void PyModuleRegistry::init()
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
// Set up global python interpreter
#if PY_MAJOR_VERSION >= 3
bool PyModuleRegistry::handle_mgr_map(const MgrMap &mgr_map_)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
if (mgr_map.epoch == 0) {
mgr_map = mgr_map_;
void PyModuleRegistry::standby_start(MonClient &mc)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
ceph_assert(active_modules == nullptr);
ceph_assert(standby_modules == nullptr);
Objecter &objecter_, Client &client_, Finisher &f,
DaemonServer &server)
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
dout(4) << "Starting modules in active mode" << dendl;
void PyModuleRegistry::active_shutdown()
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
if (active_modules != nullptr) {
active_modules->shutdown();
void PyModuleRegistry::shutdown()
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
if (standby_modules != nullptr) {
standby_modules->shutdown();
std::vector<ModuleCommand> PyModuleRegistry::get_py_commands() const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::vector<ModuleCommand> result;
for (const auto& i : modules) {
void PyModuleRegistry::get_health_checks(health_check_map_t *checks)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
// Only the active mgr reports module issues
if (active_modules) {
void PyModuleRegistry::handle_config(const std::string &k, const std::string &v)
{
- Mutex::Locker l(module_config.lock);
+ std::lock_guard l(module_config.lock);
if (!v.empty()) {
dout(4) << "Loaded module_config entry " << k << ":" << v << dendl;
*/
std::list<PyModuleRef> get_modules() const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::list<PyModuleRef> modules_out;
for (const auto &i : modules) {
modules_out.push_back(i.second);
*/
PyModuleRef get_module(const std::string &module_name)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return modules.at(module_name);
}
// FIXME: completely identical to ActivePyModules
void StandbyPyModules::shutdown()
{
- Mutex::Locker locker(lock);
+ std::lock_guard locker(lock);
// Signal modules to drop out of serve() and/or tear down resources
for (auto &i : modules) {
int StandbyPyModules::start_one(PyModuleRef py_module)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
const std::string &module_name = py_module->get_name();
ceph_assert(modules.count(module_name) == 0);
void set_mgr_map(const MgrMap &mgr_map_)
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
mgr_map = mgr_map_;
}
template<typename Callback, typename...Args>
void with_mgr_map(Callback&& cb, Args&&...args) const
{
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
std::forward<Callback>(cb)(mgr_map, std::forward<Args>(args)...);
}
template<typename Callback, typename...Args>
auto with_config(Callback&& cb, Args&&... args) const ->
decltype(cb(module_config, std::forward<Args>(args)...)) {
- Mutex::Locker l(lock);
+ std::lock_guard l(lock);
return std::forward<Callback>(cb)(module_config, std::forward<Args>(args)...);
}