#include "global/pidfile.h"
#include "osd.h"
+SET_SUBSYS(osd);
+
using namespace std::literals;
namespace bpo = boost::program_options;
using crimson::common::local_conf;
int main(int argc, const char* argv[])
{
+ LOG_PREFIX(OSD::main);
+
+ INFO("parsing early config");
auto early_config_result = crimson::osd::get_early_config(argc, argv);
if (!early_config_result.has_value()) {
int r = early_config_result.error();
return r;
}
auto &early_config = early_config_result.value();
+ INFO("early config parsed successfully");
auto seastar_n_early_args = early_config.get_early_args();
auto config_proxy_args = early_config.get_ceph_args();
+ INFO("initializing seastar app_template");
seastar::app_template::config app_cfg;
app_cfg.name = "Crimson";
app_cfg.auto_handle_sigint_sigterm = false;
seastar::app_template app(std::move(app_cfg));
+
+ INFO("registering CLI options");
app.add_options()
("mkkey", "generate a new secret key. "
"This is normally used in combination with --mkfs")
"Prometheus metrics prefix");
try {
+ INFO("entering seastar runtime");
return app.run(
seastar_n_early_args.size(),
const_cast<char**>(seastar_n_early_args.data()),
auto& config = app.configuration();
return seastar::async([&] {
try {
+ INFO("seastar runtime started");
+
FatalSignal fatal_signal;
seastar_apps_lib::stop_signal should_stop;
+
if (config.count("debug")) {
+ INFO("enabling debug logging");
seastar::global_logger_registry().set_all_loggers_level(
seastar::log_level::debug
);
}
if (config.count("trace")) {
+ INFO("enabling trace logging");
seastar::global_logger_registry().set_all_loggers_level(
seastar::log_level::trace
);
}
+
+ DEBUG("starting sharded config service");
sharded_conf().start(
early_config.init_params.name, early_config.cluster_name).get();
local_conf().start().get();
auto stop_conf = seastar::deferred_stop(sharded_conf());
+
+ DEBUG("starting performance counters");
sharded_perf_coll().start().get();
auto stop_perf_coll = seastar::deferred_stop(sharded_perf_coll());
+
+ DEBUG("parsing config files");
local_conf().parse_config_files(early_config.conf_file_list).get();
local_conf().parse_env().get();
local_conf().parse_argv(config_proxy_args).get();
+
+ DEBUG("initializing logger output");
auto log_file_stream = maybe_set_logger();
auto reset_logger = seastar::defer([] {
logger().set_ostream(std::cerr);
});
+
+ DEBUG("writing pidfile");
if (const auto ret = pidfile_write(local_conf()->pid_file);
ret == -EACCES || ret == -EAGAIN) {
ceph_abort_msg(
ceph_abort_msg(fmt::format("pidfile_write failed with {} {}",
ret, cpp_strerror(-ret)));
}
+
+ DEBUG("setting ignore SIGHUP");
// just ignore SIGHUP, we don't reread settings. keep in mind signals
// handled by S* must be blocked for alien threads (see AlienStore).
seastar::handle_signal(SIGHUP, [] {});
std::any stop_prometheus;
if (uint16_t prom_port = config["prometheus_port"].as<uint16_t>();
prom_port != 0) {
+
+ DEBUG("starting prometheus server on port {}", prom_port);
prom_server.start("prometheus").get();
stop_prometheus = seastar::make_shared(seastar::deferred_stop(prom_server));
}).get();
}
+ DEBUG("creating messengers");
const int whoami = std::stoi(local_conf()->name.get_id());
const auto nonce = crimson::osd::get_nonce();
crimson::net::MessengerRef cluster_msgr, client_msgr;
nonce,
true);
}
+
+ DEBUG("creating object store");
auto store = crimson::os::FuturizedStore::create(
local_conf().get_val<std::string>("osd_objectstore"),
local_conf().get_val<std::string>("osd_data"),
local_conf().get_config_values());
- logger().info("passed objectstore is {}", local_conf().get_val<std::string>("osd_objectstore"));
+ INFO("passed objectstore is {}", local_conf().get_val<std::string>("osd_objectstore"));
+
crimson::osd::OSD osd(
whoami, nonce, std::ref(should_stop.abort_source()),
std::ref(*store), cluster_msgr, client_msgr,
hb_front_msgr, hb_back_msgr);
if (config.count("mkkey")) {
+ DEBUG("generating keyring");
make_keyring().get();
}
+
if (local_conf()->no_mon_config) {
- logger().info("bypassing the config fetch due to --no-mon-config");
+ INFO("bypassing the config fetch due to --no-mon-config");
} else {
+ DEBUG("fetching config from monitors");
crimson::osd::populate_config_from_mon().get();
}
if (config.count("mkfs")) {
+ DEBUG("running mkfs");
auto osd_uuid = local_conf().get_val<uuid_d>("osd_uuid");
if (osd_uuid.is_zero()) {
+ DEBUG("uuid not specified, generating random osd uuid");
// use a random osd uuid if not specified
osd_uuid.generate_random();
}
config["osdspec-affinity"].as<std::string>()).get();
}
if (config.count("mkkey") || config.count("mkfs")) {
+ DEBUG("exiting, mkkey {}, mkfs {}", config.count("mkkey"), config.count("mkfs"));
return EXIT_SUCCESS;
} else {
+ DEBUG("starting OSD services");
osd.start().get();
}
- logger().info("crimson startup completed");
+ INFO("crimson startup completed");
+
should_stop.wait().get();
- logger().info("crimson shutting down");
+ INFO("crimson shutting down");
osd.stop().get();
// stop()s registered using defer() are called here
} catch (...) {
logger().error("startup failed: {}", std::current_exception());
return EXIT_FAILURE;
}
- logger().info("crimson shutdown complete");
+ INFO("crimson shutdown complete");
return EXIT_SUCCESS;
});
});
clog(log_client.create_channel())
{
LOG_PREFIX(OSD::OSD);
+ DEBUG("");
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
for (auto msgr : {std::ref(cluster_msgr), std::ref(public_msgr),
std::ref(hb_front_msgr), std::ref(hb_back_msgr)}) {
seastar::future<> OSD::open_meta_coll()
{
+ LOG_PREFIX(OSD::open_meta_coll);
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
+ DEBUG("opening metadata collection");
return store.get_sharded_store().open_collection(
coll_t::meta()
- ).then([this](auto ch) {
+ ).then([this, FNAME](auto ch) {
+ DEBUG("registering metadata collection");
pg_shard_manager.init_meta_coll(ch, store.get_sharded_store());
return seastar::now();
});
seastar::future<OSDMeta> OSD::open_or_create_meta_coll(FuturizedStore &store)
{
- return store.get_sharded_store().open_collection(coll_t::meta()).then([&store](auto ch) {
+ LOG_PREFIX(OSD::open_or_create_meta_coll);
+ DEBUG("");
+ return store.get_sharded_store().open_collection(coll_t::meta()).then([&store, FNAME](auto ch) {
if (!ch) {
+ DEBUG("creating new metadata collection");
return store.get_sharded_store().create_new_collection(
coll_t::meta()
).then([&store](auto ch) {
return OSDMeta(ch, store.get_sharded_store());
});
} else {
+ DEBUG("meta collection already exists");
return seastar::make_ready_future<OSDMeta>(ch, store.get_sharded_store());
}
});
std::string osdspec_affinity)
{
LOG_PREFIX(OSD::mkfs);
-
+ DEBUG("starting store mkfs");
co_await store.start();
+ DEBUG("calling store mkfs");
co_await store.mkfs(osd_uuid).handle_error(
crimson::stateful_ec::assert_failure(fmt::format(
"{} error creating empty object store in {}",
FNAME, local_conf().get_val<std::string>("osd_data")).c_str())
);
+ DEBUG("mounting store mkfs");
co_await store.mount().handle_error(
crimson::stateful_ec::assert_failure(fmt::format(
"{} error mounting object store in {}",
std::move(meta_coll),
std::move(superblock),
[&store, FNAME](auto &meta_coll, auto &superblock) {
+ DEBUG("try loading existing superblock");
return meta_coll.load_superblock(
).safe_then([&superblock, FNAME](OSDSuperblock&& sb) {
if (sb.cluster_fsid != superblock.cluster_fsid) {
crimson::ct_error::enoent::handle([&store, &meta_coll, &superblock,
FNAME] {
// meta collection does not yet, create superblock
- INFO("{} writing superblock cluster_fsid {} osd_fsid {}",
- "_write_superblock",
+ INFO("writing superblock cluster_fsid {} osd_fsid {}",
superblock.cluster_fsid,
superblock.osd_fsid);
ceph::os::Transaction t;
meta_coll.create(t);
meta_coll.store_superblock(t, superblock);
- DEBUG("OSD::_write_superblock: do_transaction...");
+ DEBUG("do_transaction: create meta collection and store superblock");
return store.get_sharded_store().do_transaction(
meta_coll.collection(),
std::move(t));
}),
- crimson::ct_error::assert_all("_write_superbock error")
+ crimson::ct_error::assert_all("_write_superblock error")
);
});
}
}
startup_time = ceph::mono_clock::now();
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
+ DEBUG("starting store");
return store.start().then([this] {
return pg_to_shard_mappings.start(0, seastar::smp::count
).then([this] {
heartbeat.reset(new Heartbeat{
whoami, get_shard_services(),
*monc, *hb_front_msgr, *hb_back_msgr});
+ DEBUG("mounting store");
return store.mount().handle_error(
crimson::stateful_ec::assert_failure(fmt::format(
"{} error mounting object store in {}",
stats_timer.arm_periodic(std::chrono::seconds(stats_seconds));
}
+ DEBUG("open metadata collection");
return open_meta_coll();
- }).then([this] {
+ }).then([this, FNAME] {
+ DEBUG("loading superblock");
return pg_shard_manager.get_meta_coll().load_superblock(
).handle_error(
crimson::ct_error::assert_all("open_meta_coll error")
return shard_services.invoke_on_all([this](auto &local_service) {
local_service.local_state.osdmap_gate.got_map(osdmap->get_epoch());
});
- }).then([this] {
+ }).then([this, FNAME] {
bind_epoch = osdmap->get_epoch();
+ DEBUG("loading PGs");
return pg_shard_manager.load_pgs(store);
}).then([this, FNAME] {
uint64_t osd_required =
[FNAME] (const std::error_code& e) {
ERROR("public messenger bind(): {}", e);
})));
- }).then_unpack([this] {
+ }).then_unpack([this, FNAME] {
+ DEBUG("starting mon and mgr clients");
return seastar::when_all_succeed(monc->start(),
mgrc->start());
- }).then_unpack([this] {
+ }).then_unpack([this, FNAME] {
+ DEBUG("adding to crush");
return _add_me_to_crush();
}).then([this] {
return _add_device_class();
return start_asok_admin();
}).then([this] {
return log_client.set_fsid(monc->get_fsid());
- }).then([this] {
+ }).then([this, FNAME] {
+ DEBUG("starting boot");
return start_boot();
});
}