From: Kefu Chai Date: Thu, 24 Dec 2020 07:27:41 +0000 (+0800) Subject: mgr/ActivePyModules.cc: use wrappers for acquiring/releasing GIL X-Git-Tag: v14.2.22~27^2~9^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=2467896cf288063f4ee55495f54c2e58a314f6c1;p=ceph.git mgr/ActivePyModules.cc: use wrappers for acquiring/releasing GIL this change is a follow-up of 0601b31a53a455f0b67c981460d198cb3a97f3de, for couple reasons - document the guideline for locking when working with python GIL - add primitives to extract the patterns for acquiring/releasing GIL. so they can be reused. Fixes: https://tracker.ceph.com/issues/39264 Signed-off-by: Kefu Chai (cherry picked from commit 9c652fb305a3e3d1b4a752bac9bd8a85d15c7de9) Conflicts: src/mgr/ActivePyModules.cc src/mgr/DaemonState.cc - convert std::pair to DaemonKey::type-name in a few places - Removed "mds_metadata" which doesn't exist in latest Nautilus --- diff --git a/src/mgr/ActivePyModules.cc b/src/mgr/ActivePyModules.cc index 2aaa880c30ced..bccdb06175bf7 100644 --- a/src/mgr/ActivePyModules.cc +++ b/src/mgr/ActivePyModules.cc @@ -56,6 +56,66 @@ ActivePyModules::ActivePyModules(PyModuleConfig &module_config_, ActivePyModules::~ActivePyModules() = default; +namespace { + // because the Python runtime could relinquish the GIL when performing GC + // and re-acquire it afterwards, we should enforce following locking policy: + // 1. do not acquire locks when holding the GIL, use a without_gil or + // without_gil_t to guard the code which acquires non-gil locks. + // 2. always hold a GIL when calling python functions, for example, when + // constructing a PyFormatter instance. + // + // a wrapper that provides a convenient RAII-style mechinary for acquiring + // and releasing GIL, like the macros of Py_BEGIN_ALLOW_THREADS and + // Py_END_ALLOW_THREADS. + struct without_gil_t { + without_gil_t() + { + release_gil(); + } + ~without_gil_t() + { + if (save) { + acquire_gil(); + } + } + private: + void release_gil() { + save = PyEval_SaveThread(); + } + void acquire_gil() { + assert(save); + PyEval_RestoreThread(save); + save = nullptr; + } + PyThreadState *save = nullptr; + friend struct with_gil_t; + }; + struct with_gil_t { + with_gil_t(without_gil_t& allow_threads) + : allow_threads{allow_threads} + { + allow_threads.acquire_gil(); + } + ~with_gil_t() { + allow_threads.release_gil(); + } + private: + without_gil_t& allow_threads; + }; + // invoke func with GIL acquired + template + auto with_gil(without_gil_t& no_gil, Func&& func) + { + with_gil_t gil{no_gil}; + return std::invoke(std::forward(func)); + } + template + auto without_gil(Func&& func) { + without_gil_t no_gil; + return std::invoke(std::forward(func)); + } +} + void ActivePyModules::dump_server(const std::string &hostname, const DaemonStateCollection &dmc, Formatter *f) @@ -65,15 +125,16 @@ void ActivePyModules::dump_server(const std::string &hostname, std::string ceph_version; for (const auto &[key, state] : dmc) { - std::lock_guard l(state->lock); - // TODO: pick the highest version, and make sure that - // somewhere else (during health reporting?) we are - // indicating to the user if we see mixed versions - auto ver_iter = state->metadata.find("ceph_version"); - if (ver_iter != state->metadata.end()) { - ceph_version = state->metadata.at("ceph_version"); - } - + without_gil([&ceph_version, state=state] { + std::lock_guard l(state->lock); + // TODO: pick the highest version, and make sure that + // somewhere else (during health reporting?) we are + // indicating to the user if we see mixed versions + auto ver_iter = state->metadata.find("ceph_version"); + if (ver_iter != state->metadata.end()) { + ceph_version = state->metadata.at("ceph_version"); + } + }); f->open_object_section("service"); f->dump_string("type", key.type); f->dump_string("id", key.name); @@ -84,17 +145,13 @@ void ActivePyModules::dump_server(const std::string &hostname, f->dump_string("ceph_version", ceph_version); } - - PyObject *ActivePyModules::get_server_python(const std::string &hostname) { - PyThreadState *tstate = PyEval_SaveThread(); - std::lock_guard l(lock); - PyEval_RestoreThread(tstate); - dout(10) << " (" << hostname << ")" << dendl; - - auto dmc = daemon_state.get_by_server(hostname); - + const auto dmc = without_gil([&]{ + std::lock_guard l(lock); + dout(10) << " (" << hostname << ")" << dendl; + return daemon_state.get_by_server(hostname); + }); PyFormatter f; dump_server(hostname, dmc, &f); return f.get(); @@ -103,24 +160,20 @@ PyObject *ActivePyModules::get_server_python(const std::string &hostname) PyObject *ActivePyModules::list_servers_python() { - PyFormatter f(false, true); - PyThreadState *tstate = PyEval_SaveThread(); dout(10) << " >" << dendl; - daemon_state.with_daemons_by_server([this, &f, &tstate] + without_gil_t no_gil; + return daemon_state.with_daemons_by_server([this, &no_gil] (const std::map &all) { - PyEval_RestoreThread(tstate); - - for (const auto &i : all) { - const auto &hostname = i.first; - + with_gil_t with_gil{no_gil}; + PyFormatter f(false, true); + for (const auto &[hostname, daemon_state] : all) { f.open_object_section("server"); - dump_server(hostname, i.second, &f); + dump_server(hostname, daemon_state, &f); f.close_section(); } + return f.get(); }); - - return f.get(); } PyObject *ActivePyModules::get_metadata_python( @@ -132,8 +185,9 @@ PyObject *ActivePyModules::get_metadata_python( derr << "Requested missing service " << svc_type << "." << svc_id << dendl; Py_RETURN_NONE; } - - std::lock_guard l(metadata->lock); + auto l = without_gil([&] { + return std::lock_guard(lock); + }); PyFormatter f; f.dump_string("hostname", metadata->hostname); for (const auto &i : metadata->metadata) { @@ -152,8 +206,9 @@ PyObject *ActivePyModules::get_daemon_status_python( derr << "Requested missing service " << svc_type << "." << svc_id << dendl; Py_RETURN_NONE; } - - std::lock_guard l(metadata->lock); + auto l = without_gil([&] { + return std::lock_guard(lock); + }); PyFormatter f; for (const auto &i : metadata->service_status) { f.dump_string(i.first.c_str(), i.second); @@ -168,25 +223,25 @@ PyObject *ActivePyModules::get_python(const std::string &what) // Drop the GIL, as most of the following blocks will block on // a mutex -- they are all responsible for re-taking the GIL before // touching the PyFormatter instance or returning from the function. - PyThreadState *tstate = PyEval_SaveThread(); + without_gil_t no_gil; if (what == "fs_map") { - cluster_state.with_fsmap([&f, &tstate](const FSMap &fsmap) { - PyEval_RestoreThread(tstate); + return cluster_state.with_fsmap([&](const FSMap &fsmap) { + with_gil_t with_gil{no_gil}; fsmap.dump(&f); + return f.get(); }); - return f.get(); } else if (what == "osdmap_crush_map_text") { bufferlist rdata; - cluster_state.with_osdmap([&rdata, &tstate](const OSDMap &osd_map){ - PyEval_RestoreThread(tstate); + cluster_state.with_osdmap([&](const OSDMap &osd_map){ osd_map.crush->encode(rdata, CEPH_FEATURES_SUPPORTED_DEFAULT); }); std::string crush_text = rdata.to_str(); - return PyString_FromString(crush_text.c_str()); + with_gil_t with_gil{no_gil}; + return PyUnicode_FromString(crush_text.c_str()); } else if (what.substr(0, 7) == "osd_map") { - cluster_state.with_osdmap([&f, &what, &tstate](const OSDMap &osd_map){ - PyEval_RestoreThread(tstate); + return cluster_state.with_osdmap([&](const OSDMap &osd_map){ + with_gil_t with_gil{no_gil}; if (what == "osd_map") { osd_map.dump(&f); } else if (what == "osd_map_tree") { @@ -194,10 +249,9 @@ PyObject *ActivePyModules::get_python(const std::string &what) } else if (what == "osd_map_crush") { osd_map.crush->dump(&f); } + return f.get(); }); - return f.get(); } else if (what == "modified_config_options") { - PyEval_RestoreThread(tstate); auto all_daemons = daemon_state.get_all(); set names; for (auto& [key, daemon] : all_daemons) { @@ -206,6 +260,7 @@ PyObject *ActivePyModules::get_python(const std::string &what) names.insert(name); } } + with_gil_t with_gil{no_gil}; f.open_array_section("options"); for (auto& name : names) { f.dump_string("name", name); @@ -213,7 +268,7 @@ PyObject *ActivePyModules::get_python(const std::string &what) f.close_section(); return f.get(); } else if (what.substr(0, 6) == "config") { - PyEval_RestoreThread(tstate); + with_gil_t with_gil{no_gil}; if (what == "config_options") { g_conf().config_options(&f); } else if (what == "config") { @@ -221,40 +276,34 @@ PyObject *ActivePyModules::get_python(const std::string &what) } return f.get(); } else if (what == "mon_map") { - cluster_state.with_monmap( - [&f, &tstate](const MonMap &monmap) { - PyEval_RestoreThread(tstate); - monmap.dump(&f); - } - ); - return f.get(); + return cluster_state.with_monmap([&](const MonMap &monmap) { + with_gil_t with_gil{no_gil}; + monmap.dump(&f); + return f.get(); + }); } else if (what == "service_map") { - cluster_state.with_servicemap( - [&f, &tstate](const ServiceMap &service_map) { - PyEval_RestoreThread(tstate); - service_map.dump(&f); - } - ); - return f.get(); + return cluster_state.with_servicemap([&](const ServiceMap &service_map) { + with_gil_t with_gil{no_gil}; + service_map.dump(&f); + return f.get(); + }); } else if (what == "osd_metadata") { auto dmc = daemon_state.get_by_service("osd"); - PyEval_RestoreThread(tstate); - for (const auto &[key, state] : dmc) { std::lock_guard l(state->lock); - f.open_object_section(key.name.c_str()); - f.dump_string("hostname", state->hostname); - for (const auto &[name, val] : state->metadata) { - f.dump_string(name.c_str(), val); - } - f.close_section(); + with_gil(no_gil, [&f, &name=key.name, state=state] { + f.open_object_section(name.c_str()); + f.dump_string("hostname", state->hostname); + for (const auto &[name, val] : state->metadata) { + f.dump_string(name.c_str(), val); + } + f.close_section(); + }); } - return f.get(); + return with_gil(no_gil, [&] { return f.get(); }); } else if (what == "pg_summary") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); - + return cluster_state.with_pgmap( + [&f, &no_gil](const PGMap &pg_map) { std::map > osds; std::map > pools; std::map all; @@ -268,6 +317,7 @@ PyObject *ActivePyModules::get_python(const std::string &what) } all[state]++; } + with_gil_t with_gil{no_gil}; f.open_object_section("by_osd"); for (const auto &i : osds) { f.open_object_section(i.first.c_str()); @@ -294,136 +344,129 @@ PyObject *ActivePyModules::get_python(const std::string &what) f.open_object_section("pg_stats_sum"); pg_map.pg_sum.dump(&f); f.close_section(); + return f.get(); } ); - return f.get(); } else if (what == "pg_status") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap( + [&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.print_summary(&f, nullptr); + return f.get(); } ); - return f.get(); } else if (what == "pg_dump") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap( + [&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.dump(&f, false); + return f.get(); } ); - return f.get(); } else if (what == "devices") { daemon_state.with_devices2( - [&tstate, &f]() { - PyEval_RestoreThread(tstate); - f.open_array_section("devices"); + [&] { + with_gil(no_gil, [&] { f.open_array_section("devices"); }); }, - [&f] (const DeviceState& dev) { - f.dump_object("device", dev); + [&](const DeviceState &dev) { + with_gil(no_gil, [&] { f.dump_object("device", dev); }); }); - f.close_section(); - return f.get(); + return with_gil(no_gil, [&] { + f.close_section(); + return f.get(); + }); } else if (what.size() > 7 && what.substr(0, 7) == "device ") { string devid = what.substr(7); - if (!daemon_state.with_device( - devid, - [&f, &tstate] (const DeviceState& dev) { - PyEval_RestoreThread(tstate); - f.dump_object("device", dev); - })) { + if (!daemon_state.with_device(devid, + [&] (const DeviceState& dev) { + with_gil_t with_gil{no_gil}; + f.dump_object("device", dev); + })) { // device not found - PyEval_RestoreThread(tstate); } - return f.get(); + return with_gil(no_gil, [&] { return f.get(); }); } else if (what == "io_rate") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap( + [&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.dump_delta(&f); + return f.get(); } ); - return f.get(); } else if (what == "df") { - cluster_state.with_osdmap_and_pgmap( - [this, &f, &tstate]( + return cluster_state.with_osdmap_and_pgmap( + [&]( const OSDMap& osd_map, const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + with_gil_t with_gil{no_gil}; pg_map.dump_cluster_stats(nullptr, &f, true); pg_map.dump_pool_stats_full(osd_map, nullptr, &f, true); + return f.get(); }); - return f.get(); } else if (what == "pg_stats") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap([&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.dump_pg_stats(&f, false); + return f.get(); }); - return f.get(); } else if (what == "pool_stats") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap([&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.dump_pool_stats(&f); + return f.get(); }); - return f.get(); } else if (what == "pg_ready") { - PyEval_RestoreThread(tstate); + with_gil_t with_gil{no_gil}; server.dump_pg_ready(&f); return f.get(); } else if (what == "osd_stats") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap([&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.dump_osd_stats(&f, false); + return f.get(); }); - return f.get(); } else if (what == "osd_ping_times") { - cluster_state.with_pgmap( - [&f, &tstate](const PGMap &pg_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_pgmap([&](const PGMap &pg_map) { + with_gil_t with_gil{no_gil}; pg_map.dump_osd_ping_times(&f); + return f.get(); }); - return f.get(); } else if (what == "osd_pool_stats") { int64_t poolid = -ENOENT; - string pool_name; - cluster_state.with_osdmap_and_pgmap([&](const OSDMap& osdmap, + return cluster_state.with_osdmap_and_pgmap([&](const OSDMap& osdmap, const PGMap& pg_map) { - PyEval_RestoreThread(tstate); - f.open_array_section("pool_stats"); - for (auto &p : osdmap.get_pools()) { - poolid = p.first; - pg_map.dump_pool_stats_and_io_rate(poolid, osdmap, &f, nullptr); - } - f.close_section(); + with_gil_t with_gil{no_gil}; + f.open_array_section("pool_stats"); + for (auto &p : osdmap.get_pools()) { + poolid = p.first; + pg_map.dump_pool_stats_and_io_rate(poolid, osdmap, &f, nullptr); + } + f.close_section(); + return f.get(); }); - return f.get(); } else if (what == "health") { - cluster_state.with_health( - [&f, &tstate](const ceph::bufferlist &health_json) { - PyEval_RestoreThread(tstate); + return cluster_state.with_health([&](const ceph::bufferlist &health_json) { + with_gil_t with_gil{no_gil}; f.dump_string("json", health_json.to_str()); + return f.get(); }); - return f.get(); } else if (what == "mon_status") { - cluster_state.with_mon_status( - [&f, &tstate](const ceph::bufferlist &mon_status_json) { - PyEval_RestoreThread(tstate); + return cluster_state.with_mon_status( + [&](const ceph::bufferlist &mon_status_json) { + with_gil_t with_gil{no_gil}; f.dump_string("json", mon_status_json.to_str()); + return f.get(); }); - return f.get(); } else if (what == "mgr_map") { - cluster_state.with_mgrmap([&f, &tstate](const MgrMap &mgr_map) { - PyEval_RestoreThread(tstate); + return cluster_state.with_mgrmap([&](const MgrMap &mgr_map) { + with_gil_t with_gil{no_gil}; mgr_map.dump(&f); + return f.get(); }); - return f.get(); } else { derr << "Python module requested unknown data '" << what << "'" << dendl; - PyEval_RestoreThread(tstate); + with_gil_t with_gil{no_gil}; Py_RETURN_NONE; } } @@ -522,9 +565,8 @@ void ActivePyModules::notify_all(const LogEntry &log_entry) bool ActivePyModules::get_store(const std::string &module_name, const std::string &key, std::string *val) const { - PyThreadState *tstate = PyEval_SaveThread(); + without_gil_t no_gil; std::lock_guard l(lock); - PyEval_RestoreThread(tstate); const std::string global_key = PyModule::config_prefix + module_name + "/" + key; @@ -577,7 +619,7 @@ PyObject *ActivePyModules::get_typed_config( const std::string &key, const std::string &prefix) const { - PyThreadState *tstate = PyEval_SaveThread(); + without_gil_t no_gil; std::string value; std::string final_key; bool found = false; @@ -591,7 +633,7 @@ PyObject *ActivePyModules::get_typed_config( } if (found) { PyModuleRef module = py_module_registry.get_module(module_name); - PyEval_RestoreThread(tstate); + with_gil_t with_gil{no_gil}; if (!module) { derr << "Module '" << module_name << "' is not available" << dendl; Py_RETURN_NONE; @@ -602,37 +644,36 @@ PyObject *ActivePyModules::get_typed_config( dout(10) << __func__ << " " << final_key << " found" << dendl; return module->get_typed_option_value(key, value); } - PyEval_RestoreThread(tstate); if (prefix.size()) { dout(4) << __func__ << " [" << prefix << "/]" << key << " not found " << dendl; } else { dout(4) << __func__ << " " << key << " not found " << dendl; } + with_gil_t with_gil{no_gil}; Py_RETURN_NONE; } PyObject *ActivePyModules::get_store_prefix(const std::string &module_name, const std::string &prefix) const { - PyThreadState *tstate = PyEval_SaveThread(); + without_gil_t no_gil; std::lock_guard l(lock); std::lock_guard lock(module_config.lock); - PyEval_RestoreThread(tstate); const std::string base_prefix = PyModule::config_prefix + module_name + "/"; const std::string global_prefix = base_prefix + prefix; dout(4) << __func__ << " prefix: " << global_prefix << dendl; - PyFormatter f; - - for (auto p = store_cache.lower_bound(global_prefix); - p != store_cache.end() && p->first.find(global_prefix) == 0; - ++p) { - f.dump_string(p->first.c_str() + base_prefix.size(), p->second); - } - return f.get(); + return with_gil(no_gil, [&] { + PyFormatter f; + for (auto p = store_cache.lower_bound(global_prefix); + p != store_cache.end() && p->first.find(global_prefix) == 0; ++p) { + f.dump_string(p->first.c_str() + base_prefix.size(), p->second); + } + return f.get(); + }); } void ActivePyModules::set_store(const std::string &module_name, @@ -703,31 +744,32 @@ PyObject* ActivePyModules::with_perf_counters( const std::string &svc_id, const std::string &path) const { - PyThreadState *tstate = PyEval_SaveThread(); - std::lock_guard l(lock); - PyEval_RestoreThread(tstate); - PyFormatter f; f.open_array_section(path.c_str()); - - auto metadata = daemon_state.get(DaemonKey{svc_name, svc_id}); - if (metadata) { - std::lock_guard l2(metadata->lock); - if (metadata->perf_counters.instances.count(path)) { - auto counter_instance = metadata->perf_counters.instances.at(path); - auto counter_type = metadata->perf_counters.types.at(path); - fct(counter_instance, counter_type, f); - } else { - dout(4) << "Missing counter: '" << path << "' (" - << svc_name << "." << svc_id << ")" << dendl; - dout(20) << "Paths are:" << dendl; - for (const auto &i : metadata->perf_counters.instances) { - dout(20) << i.first << dendl; + { + without_gil_t no_gil; + std::lock_guard l(lock); + auto metadata = daemon_state.get(DaemonKey{svc_name, svc_id}); + if (metadata) { + std::lock_guard l2(metadata->lock); + if (metadata->perf_counters.instances.count(path)) { + auto counter_instance = metadata->perf_counters.instances.at(path); + auto counter_type = metadata->perf_counters.types.at(path); + with_gil(no_gil, [&] { + fct(counter_instance, counter_type, f); + }); + } else { + dout(4) << "Missing counter: '" << path << "' (" + << svc_name << "." << svc_id << ")" << dendl; + dout(20) << "Paths are:" << dendl; + for (const auto &i : metadata->perf_counters.instances) { + dout(20) << i.first << dendl; + } } + } else { + dout(4) << "No daemon state for " << svc_name << "." << svc_id << ")" + << dendl; } - } else { - dout(4) << "No daemon state for " - << svc_name << "." << svc_id << ")" << dendl; } f.close_section(); return f.get(); @@ -793,9 +835,8 @@ PyObject* ActivePyModules::get_perf_schema_python( const std::string &svc_type, const std::string &svc_id) { - PyThreadState *tstate = PyEval_SaveThread(); + without_gil_t no_gil; std::lock_guard l(lock); - PyEval_RestoreThread(tstate); DaemonStateCollection daemons; @@ -812,26 +853,29 @@ PyObject* ActivePyModules::get_perf_schema_python( } } - PyFormatter f; + auto f = with_gil(no_gil, [&] { + return PyFormatter(); + }); if (!daemons.empty()) { for (auto& [key, state] : daemons) { - f.open_object_section(ceph::to_string(key).c_str()); - std::lock_guard l(state->lock); - for (auto ctr_inst_iter : state->perf_counters.instances) { - const auto &counter_name = ctr_inst_iter.first; - f.open_object_section(counter_name.c_str()); - auto type = state->perf_counters.types[counter_name]; - f.dump_string("description", type.description); - if (!type.nick.empty()) { - f.dump_string("nick", type.nick); - } - f.dump_unsigned("type", type.type); - f.dump_unsigned("priority", type.priority); - f.dump_unsigned("units", type.unit); - f.close_section(); - } - f.close_section(); + with_gil(no_gil, [&, key=ceph::to_string(key), state=state] { + f.open_object_section(key.c_str()); + for (auto ctr_inst_iter : state->perf_counters.instances) { + const auto &counter_name = ctr_inst_iter.first; + f.open_object_section(counter_name.c_str()); + auto type = state->perf_counters.types[counter_name]; + f.dump_string("description", type.description); + if (!type.nick.empty()) { + f.dump_string("nick", type.nick); + } + f.dump_unsigned("type", type.type); + f.dump_unsigned("priority", type.priority); + f.dump_unsigned("units", type.unit); + f.close_section(); + } + f.close_section(); + }); } } else { dout(4) << __func__ << ": No daemon state found for " @@ -842,10 +886,9 @@ PyObject* ActivePyModules::get_perf_schema_python( PyObject *ActivePyModules::get_context() { - PyThreadState *tstate = PyEval_SaveThread(); - std::lock_guard l(lock); - PyEval_RestoreThread(tstate); - + auto l = without_gil([&] { + return std::lock_guard(lock); + }); // Construct a capsule containing ceph context. // Not incrementing/decrementing ref count on the context because // it's the global one and it has process lifetime. @@ -900,16 +943,13 @@ PyObject *construct_with_capsule( PyObject *ActivePyModules::get_osdmap() { - OSDMap *newmap = new OSDMap; - - PyThreadState *tstate = PyEval_SaveThread(); - { + auto newmap = without_gil([&] { + OSDMap *newmap = new OSDMap; cluster_state.with_osdmap([&](const OSDMap& o) { - newmap->deepish_copy_from(o); - }); - } - PyEval_RestoreThread(tstate); - + newmap->deepish_copy_from(o); + }); + return newmap; + }); return construct_with_capsule("mgr_module", "OSDMap", (void*)newmap); } diff --git a/src/mgr/ActivePyModules.h b/src/mgr/ActivePyModules.h index 8a14a4671bfff..48dd43c31166e 100644 --- a/src/mgr/ActivePyModules.h +++ b/src/mgr/ActivePyModules.h @@ -95,6 +95,7 @@ public: const std::string &svc_id); PyObject *get_context(); PyObject *get_osdmap(); + /// @note @c fct is not allowed to acquire locks when holding GIL PyObject *with_perf_counters( std::function - void with_servicemap(Callback&& cb, Args&&...args) const + auto with_servicemap(Callback&& cb, Args&&...args) const { std::lock_guard l(lock); - std::forward(cb)(servicemap, std::forward(args)...); + return std::forward(cb)(servicemap, std::forward(args)...); } template - void with_fsmap(Callback&& cb, Args&&...args) const + auto with_fsmap(Callback&& cb, Args&&...args) const { std::lock_guard l(lock); - std::forward(cb)(fsmap, std::forward(args)...); + return std::forward(cb)(fsmap, std::forward(args)...); } template - void with_mgrmap(Callback&& cb, Args&&...args) const + auto with_mgrmap(Callback&& cb, Args&&...args) const { std::lock_guard l(lock); - std::forward(cb)(mgr_map, std::forward(args)...); + return std::forward(cb)(mgr_map, std::forward(args)...); } template @@ -111,11 +111,11 @@ public: } template - void with_monmap(Args &&... args) const + auto with_monmap(Args &&... args) const { std::lock_guard l(lock); ceph_assert(monc != nullptr); - monc->with_monmap(std::forward(args)...); + return monc->with_monmap(std::forward(args)...); } template @@ -138,17 +138,17 @@ public: } template - void with_health(Callback&& cb, Args&&...args) const + auto with_health(Callback&& cb, Args&&...args) const { std::lock_guard l(lock); - std::forward(cb)(health_json, std::forward(args)...); + return std::forward(cb)(health_json, std::forward(args)...); } template - void with_mon_status(Callback&& cb, Args&&...args) const + auto with_mon_status(Callback&& cb, Args&&...args) const { std::lock_guard l(lock); - std::forward(cb)(mon_status_json, std::forward(args)...); + return std::forward(cb)(mon_status_json, std::forward(args)...); } void final_init(); diff --git a/src/mgr/DaemonState.cc b/src/mgr/DaemonState.cc index 3b2967b2a4be5..ac2989bd15387 100644 --- a/src/mgr/DaemonState.cc +++ b/src/mgr/DaemonState.cc @@ -273,7 +273,7 @@ void DaemonStateIndex::cull_services(const std::set& types_exist) for (auto it = all.begin(); it != all.end(); ++it) { const auto& daemon_key = it->first; if (it->second->service_daemon && - types_exist.count(daemon_key.first) == 0) { + types_exist.count(daemon_key.type) == 0) { victims.insert(daemon_key); } }