int NVMeofGwMonitorClient::init()
{
+ dout(1) << "=== NVMeofGwMonitorClient::init() STARTED ===" << dendl;
dout(10) << dendl;
std::string val;
auto args = argv_to_vec(orig_argc, orig_argv);
}
}
- dout(10) << "gateway name: " << name <<
+ dout(1) << "Configuration parsed - gateway name: " << name <<
" pool:" << pool <<
" group:" << group <<
" address: " << gateway_address << dendl;
ceph_assert((server_cert.empty() == client_key.empty()) && (client_key.empty() == client_cert.empty()));
init_gw_ssl_opts();
+ dout(1) << "About to initialize signal handlers" << dendl;
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
+ dout(1) << "Signal handlers initialized" << dendl;
std::lock_guard l(lock);
+ dout(1) << "Acquired main lock, starting messenger initialization" << dendl;
// Initialize Messenger
client_messenger->add_dispatcher_tail(this);
client_messenger->add_dispatcher_head(&objecter);
client_messenger->start();
+ dout(1) << "Messenger started" << dendl;
poolctx.start(2);
+ dout(1) << "Pool context started" << dendl;
// Initialize MonClient
+ dout(1) << "About to build initial monmap" << dendl;
if (monc.build_initial_monmap() < 0) {
client_messenger->shutdown();
client_messenger->wait();
return -1;
}
+ dout(1) << "Initial monmap built successfully" << dendl;
monc.sub_want("NVMeofGw", 0, 0);
monc.set_want_keys(CEPH_ENTITY_TYPE_MON|CEPH_ENTITY_TYPE_OSD);
monc.set_messenger(client_messenger.get());
+ dout(1) << "MonClient configured" << dendl;
// We must register our config callback before calling init(), so
// that we see the initial configuration message
});
dout(4) << "nvmeof Registered monc callback" << dendl;
+ dout(1) << "About to call monc.init()" << dendl;
int r = monc.init();
+ dout(1) << "monc.init() returned: " << r << dendl;
if (r < 0) {
monc.shutdown();
client_messenger->shutdown();
}
dout(10) << "nvmeof Registered monc callback" << dendl;
+ dout(1) << "About to call monc.authenticate()" << dendl;
r = monc.authenticate();
+ dout(1) << "monc.authenticate() returned: " << r << dendl;
if (r < 0) {
derr << "Authentication failed, did you specify an ID with a valid keyring?" << dendl;
monc.shutdown();
client_messenger->wait();
return r;
}
- dout(10) << "monc.authentication done" << dendl;
+ dout(1) << "monc.authentication done" << dendl;
monc.set_passthrough_monmap();
client_t whoami = monc.get_global_id();
objecter.init();
objecter.enable_blocklist_events();
objecter.start();
+ dout(1) << "Starting timer initialization" << dendl;
timer.init();
{
std::lock_guard bl(beacon_lock);
- tick();
+ // Initialize timer state for exact frequency timing
+ next_tick_time = ceph::mono_clock::now();
+ auto tick_period = g_conf().get_val<std::chrono::seconds>("nvmeof_mon_client_tick_period");
+ dout(1) << "Initializing timer with period " << tick_period.count() << " seconds" << dendl;
}
- dout(10) << "Complete." << dendl;
+ dout(1) << "Monitor client initialization completed successfully" << dendl;
+
+ // Start the timer after full initialization is complete
+ dout(1) << "Starting timer after full initialization" << dendl;
+ start_timer();
+
return 0;
}
disconnect_panic();
send_beacon();
first_beacon = false;
- timer.add_event_after(
- g_conf().get_val<std::chrono::seconds>("nvmeof_mon_client_tick_period").count(),
- new LambdaContext([this](int r){
- tick();
- }
- ));
+
+ // Schedule next tick with exact frequency timing
+ schedule_next_tick();
+}
+
+void NVMeofGwMonitorClient::start_timer()
+{
+ std::lock_guard bl(beacon_lock);
+ dout(1) << "start_timer() called" << dendl;
+
+ dout(1) << "About to call schedule_next_tick() from start_timer()" << dendl;
+ schedule_next_tick();
+ dout(1) << "schedule_next_tick() completed successfully from start_timer()" << dendl;
+}
+
+void NVMeofGwMonitorClient::schedule_next_tick()
+{
+ ceph_assert(ceph_mutex_is_locked_by_me(beacon_lock));
+ dout(1) << "schedule_next_tick() called" << dendl;
+
+ // Calculate next tick time based on configured interval
+ auto tick_period = g_conf().get_val<std::chrono::seconds>("nvmeof_mon_client_tick_period");
+ next_tick_time += tick_period;
+
+ // Get current time to check for drift
+ auto now = ceph::mono_clock::now();
+
+ // If we're behind schedule, adjust next_tick_time to prevent drift accumulation
+ if (next_tick_time < now) {
+ dout(1) << "Timer drift detected, adjusting next_tick_time from "
+ << next_tick_time << " to " << now << dendl;
+ next_tick_time = now;
+ }
+
+ // Schedule the next tick
+ auto callback = new LambdaContext([this](int r) {
+ std::lock_guard bl(beacon_lock);
+ tick();
+ });
+
+ auto tick_time = next_tick_time;
+
+ // Call timer.add_event_at() while holding the lock (standard Ceph pattern)
+ // The timer requires the lock to be held per ceph_assert in Timer.cc
+ dout(1) << "About to call timer.add_event_at()" << dendl;
+ if (!timer.add_event_at(tick_time, callback)) {
+ dout(1) << "Failed to schedule timer event (timer shutting down)" << dendl;
+ return;
+ }
+ dout(1) << "timer.add_event_at() completed successfully" << dendl;
+
+ auto delay_ns = (next_tick_time - now).count();
+ dout(1) << "Rescheduled next tick at " << next_tick_time
+ << " (delay: " << delay_ns << " ns, "
+ << (delay_ns / 1000000.0) << " ms)" << dendl;
}
+
void NVMeofGwMonitorClient::shutdown()
{
std::lock_guard l(lock);