}));
}
-void ActivePyModules::shutdown()
-{
- std::lock_guard locker(lock);
-
- // Stop per active module finisher thread
- for (auto& [name, module] : modules) {
- dout(4) << "Stopping active module " << name << " finisher thread" << dendl;
- module->finisher.wait_for_empty();
- module->finisher.stop();
- }
-
- // Signal modules to drop out of serve() and/or tear down resources
- for (auto& [name, module] : modules) {
- lock.unlock();
- dout(10) << "calling module " << name << " shutdown()" << dendl;
- module->shutdown();
- dout(10) << "module " << name << " shutdown() returned" << dendl;
- lock.lock();
- }
-
- // For modules implementing serve(), finish the threads where we
- // were running that.
- for (auto& [name, module] : modules) {
- lock.unlock();
- dout(10) << "joining module " << name << dendl;
- module->thread.join();
- dout(10) << "joined module " << name << dendl;
- lock.lock();
- }
-
- cmd_finisher.wait_for_empty();
- cmd_finisher.stop();
-
- modules.clear();
-}
-
void ActivePyModules::notify_all(const std::string ¬ify_type,
const std::string ¬ify_id)
{
std::string *err);
int init();
- void shutdown();
void start_one(PyModuleRef py_module);
ceph_assert(r == 0);
}
-void ClusterState::shutdown()
-{
- // unregister commands
- g_ceph_context->get_admin_socket()->unregister_commands(asok_hook);
- delete asok_hook;
- asok_hook = NULL;
-}
-
bool ClusterState::asok_command(
std::string_view admin_command,
const cmdmap_t& cmdmap,
}
void final_init();
- void shutdown();
bool asok_command(std::string_view admin_command,
const cmdmap_t& cmdmap,
Formatter *f,
audit_clog(audit_clog_),
pgmap_ready(false),
timer(g_ceph_context, lock),
- shutting_down(false),
tick_event(nullptr),
osd_perf_metric_collector_listener(this),
osd_perf_metric_collector(osd_perf_metric_collector_listener),
tick_event = nullptr;
}
- // on shutdown start rejecting explicit requests to send reports that may
- // originate from python land which may still be running.
- if (shutting_down)
- return;
-
tick_event = timer.add_event_after(delay_sec,
new LambdaContext([this](int r) {
tick();
}));
}
-void DaemonServer::shutdown()
-{
- dout(10) << "begin" << dendl;
- msgr->shutdown();
- msgr->wait();
- cluster_state.shutdown();
- dout(10) << "done" << dendl;
-
- std::lock_guard l(lock);
- shutting_down = true;
- timer.shutdown();
-}
-
static DaemonKey key_from_service(
const std::string& service_name,
int peer_type,
void maybe_ready(int32_t osd_id);
SafeTimer timer;
- bool shutting_down;
Context *tick_event;
void tick();
void schedule_tick_locked(double delay_sec);
public:
int init(uint64_t gid, entity_addrvec_t client_addrs);
- void shutdown();
entity_addrvec_t get_myaddrs() const;
return loaded;
}
-void Mgr::handle_signal(int signum)
-{
- ceph_assert(signum == SIGINT || signum == SIGTERM);
- shutdown();
-}
-
static void handle_mgr_signal(int signum)
{
derr << " *** Got signal " << sig_str(signum) << " ***" << dendl;
}
}
-
-void Mgr::shutdown()
-{
- dout(10) << "mgr shutdown init" << dendl;
- finisher.queue(new LambdaContext([&](int) {
- {
- std::lock_guard l(lock);
- // First stop the server so that we're not taking any more incoming
- // requests
- server.shutdown();
- }
- // after the messenger is stopped, signal modules to shutdown via finisher
- py_module_registry->active_shutdown();
- }));
-
- // Then stop the finisher to ensure its enqueued contexts aren't going
- // to touch references to the things we're about to tear down
- finisher.wait_for_empty();
- finisher.stop();
-}
-
void Mgr::handle_osd_map()
{
ceph_assert(ceph_mutex_is_locked_by_me(lock));
bool ms_dispatch2(const ceph::ref_t<Message>& m);
void background_init(Context *completion);
- void shutdown();
-
- void handle_signal(int signum);
std::map<std::string, std::string> get_services() const;
));
}
-void MgrStandby::shutdown()
-{
- finisher.queue(new LambdaContext([&](int) {
- std::lock_guard l(lock);
-
- dout(4) << "Shutting down" << dendl;
-
- py_module_registry.shutdown();
- // stop sending beacon first, I use monc to talk with monitors
- timer.shutdown();
- // client uses monc and objecter
- client.shutdown();
- mgrc.shutdown();
- // Stop asio threads, so leftover events won't call into shut down
- // monclient/objecter.
- poolctx.finish();
- // stop monc, so mon won't be able to instruct me to shutdown/activate after
- // the active_mgr is stopped
- monc.shutdown();
- if (active_mgr) {
- active_mgr->shutdown();
- }
- // objecter is used by monc and active_mgr
- objecter.shutdown();
- // client_messenger is used by all of them, so stop it in the end
- client_messenger->shutdown();
- }));
-
- // Then stop the finisher to ensure its enqueued contexts aren't going
- // to touch references to the things we're about to tear down
- finisher.wait_for_empty();
- finisher.stop();
- mgr_perf_stop(g_ceph_context);
-}
-
void MgrStandby::respawn()
{
// --- WARNING TO FUTURE COPY/PASTERS ---
bool ms_handle_refused(Connection *con) override;
int init();
- void shutdown();
void respawn();
int main(std::vector<const char *> args);
void tick();
}
}
-void PyModuleRegistry::active_shutdown()
-{
- std::lock_guard locker(lock);
-
- if (active_modules != nullptr) {
- active_modules->shutdown();
- active_modules.reset();
- }
-}
-
-void PyModuleRegistry::shutdown()
-{
- std::lock_guard locker(lock);
-
- if (standby_modules != nullptr) {
- standby_modules->shutdown();
- standby_modules.reset();
- }
-
- // Ideally, now, we'd be able to do this for all modules:
- //
- // Py_EndInterpreter(pMyThreadState);
- // PyThreadState_Swap(pMainThreadState);
- //
- // Unfortunately, if the module has any other *python* threads active
- // at this point, Py_EndInterpreter() will abort with:
- //
- // Fatal Python error: Py_EndInterpreter: not the last thread
- //
- // This can happen when using CherryPy in a module, becuase CherryPy
- // runs an extra thread as a timeout monitor, which spends most of its
- // life inside a time.sleep(60). Unless you are very, very lucky with
- // the timing calling this destructor, that thread will still be stuck
- // in a sleep, and Py_EndInterpreter() will abort.
- //
- // This could of course also happen with a poorly written module which
- // made no attempt to clean up any additional threads it created.
- //
- // The safest thing to do is just not call Py_EndInterpreter(), and
- // let Py_Finalize() kill everything after all modules are shut down.
-
- modules.clear();
-
- PyEval_RestoreThread(pMainThreadState);
- Py_Finalize();
-}
-
std::vector<std::string> PyModuleRegistry::probe_modules(const std::string &path) const
{
const auto opt = g_conf().get_val<std::string>("mgr_disabled_modules");
return standby_modules != nullptr;
}
- void active_shutdown();
- void shutdown();
-
std::vector<MonCommand> get_commands() const;
std::vector<ModuleCommand> get_py_commands() const;