common/ceph_frag.cc
common/options.cc
common/config.cc
+ common/config_values.cc
common/utf8.c
common/mime.c
common/strtol.cc
int KeyRing::from_ceph_context(CephContext *cct)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
string filename;
int ret = ceph_resolve_file_search(conf->keyring, filename);
int check_mon_data_exists()
{
- string mon_data = g_conf->mon_data;
+ string mon_data = g_conf()->mon_data;
struct stat buf;
if (::stat(mon_data.c_str(), &buf)) {
if (errno != ENOENT) {
*/
int check_mon_data_empty()
{
- string mon_data = g_conf->mon_data;
+ string mon_data = g_conf()->mon_data;
DIR *dir = ::opendir(mon_data.c_str());
if (!dir) {
exit(1);
}
- if (g_conf->mon_data.empty()) {
+ if (g_conf()->mon_data.empty()) {
cerr << "must specify '--mon-data=foo' data path" << std::endl;
exit(1);
}
- if (g_conf->name.get_id().empty()) {
+ if (g_conf()->name.get_id().empty()) {
cerr << "must specify id (--id <id> or --name mon.<id>)" << std::endl;
exit(1);
}
int err = check_mon_data_exists();
if (err == -ENOENT) {
- if (::mkdir(g_conf->mon_data.c_str(), 0755)) {
- derr << "mkdir(" << g_conf->mon_data << ") : "
+ if (::mkdir(g_conf()->mon_data.c_str(), 0755)) {
+ derr << "mkdir(" << g_conf()->mon_data << ") : "
<< cpp_strerror(errno) << dendl;
exit(1);
}
} else if (err < 0) {
- derr << "error opening '" << g_conf->mon_data << "': "
+ derr << "error opening '" << g_conf()->mon_data << "': "
<< cpp_strerror(-err) << dendl;
exit(-err);
}
err = check_mon_data_empty();
if (err == -ENOTEMPTY) {
// Mon may exist. Let the user know and exit gracefully.
- derr << "'" << g_conf->mon_data << "' already exists and is not empty"
+ derr << "'" << g_conf()->mon_data << "' already exists and is not empty"
<< ": monitor may already exist" << dendl;
exit(0);
} else if (err < 0) {
- derr << "error checking if '" << g_conf->mon_data << "' is empty: "
+ derr << "error checking if '" << g_conf()->mon_data << "' is empty: "
<< cpp_strerror(-err) << dendl;
exit(-err);
}
MonMap monmap;
// load or generate monmap
- const auto monmap_fn = g_conf->get_val<string>("monmap");
+ const auto monmap_fn = g_conf().get_val<string>("monmap");
if (monmap_fn.length()) {
int err = monmapbl.read_file(monmap_fn.c_str(), &error);
if (err < 0) {
}
// am i part of the initial quorum?
- if (monmap.contains(g_conf->name.get_id())) {
+ if (monmap.contains(g_conf()->name.get_id())) {
// hmm, make sure the ip listed exists on the current host?
// maybe later.
- } else if (!g_conf->public_addr.is_blank_ip()) {
- entity_addr_t a = g_conf->public_addr;
+ } else if (!g_conf()->public_addr.is_blank_ip()) {
+ entity_addr_t a = g_conf()->public_addr;
if (a.get_port() == 0)
a.set_port(CEPH_MON_PORT_LEGACY);
if (monmap.contains(a)) {
string name;
monmap.get_addr_name(a, name);
- monmap.rename(name, g_conf->name.get_id());
+ monmap.rename(name, g_conf()->name.get_id());
dout(0) << argv[0] << ": renaming mon." << name << " " << a
- << " to mon." << g_conf->name.get_id() << dendl;
+ << " to mon." << g_conf()->name.get_id() << dendl;
}
} else {
// is a local address listed without a name? if so, name myself.
if (name.compare(0, 7, "noname-") == 0) {
dout(0) << argv[0] << ": mon." << name << " " << local
- << " is local, renaming to mon." << g_conf->name.get_id() << dendl;
- monmap.rename(name, g_conf->name.get_id());
+ << " is local, renaming to mon." << g_conf()->name.get_id() << dendl;
+ monmap.rename(name, g_conf()->name.get_id());
} else {
dout(0) << argv[0] << ": mon." << name << " " << local
<< " is local, but not 'noname-' + something; not assuming it's me" << dendl;
}
}
- const auto fsid = g_conf->get_val<uuid_d>("fsid");
+ const auto fsid = g_conf().get_val<uuid_d>("fsid");
if (!fsid.is_zero()) {
monmap.fsid = fsid;
dout(0) << argv[0] << ": set fsid to " << fsid << dendl;
}
// go
- MonitorDBStore store(g_conf->mon_data);
+ MonitorDBStore store(g_conf()->mon_data);
ostringstream oss;
int r = store.create_and_open(oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (r < 0) {
derr << argv[0] << ": error opening mon data directory at '"
- << g_conf->mon_data << "': " << cpp_strerror(r) << dendl;
+ << g_conf()->mon_data << "': " << cpp_strerror(r) << dendl;
exit(1);
}
assert(r == 0);
- Monitor mon(g_ceph_context, g_conf->name.get_id(), &store, 0, 0, &monmap);
+ Monitor mon(g_ceph_context, g_conf()->name.get_id(), &store, 0, 0, &monmap);
r = mon.mkfs(osdmapbl);
if (r < 0) {
derr << argv[0] << ": error creating monfs: " << cpp_strerror(r) << dendl;
exit(1);
}
store.close();
- dout(0) << argv[0] << ": created monfs at " << g_conf->mon_data
- << " for " << g_conf->name << dendl;
+ dout(0) << argv[0] << ": created monfs at " << g_conf()->mon_data
+ << " for " << g_conf()->name << dendl;
return 0;
}
err = check_mon_data_exists();
if (err < 0 && err == -ENOENT) {
- derr << "monitor data directory at '" << g_conf->mon_data << "'"
+ derr << "monitor data directory at '" << g_conf()->mon_data << "'"
<< " does not exist: have you run 'mkfs'?" << dendl;
exit(1);
} else if (err < 0) {
derr << "error accessing monitor data directory at '"
- << g_conf->mon_data << "': " << cpp_strerror(-err) << dendl;
+ << g_conf()->mon_data << "': " << cpp_strerror(-err) << dendl;
exit(1);
}
err = check_mon_data_empty();
if (err == 0) {
- derr << "monitor data directory at '" << g_conf->mon_data
+ derr << "monitor data directory at '" << g_conf()->mon_data
<< "' is empty: have you run 'mkfs'?" << dendl;
exit(1);
} else if (err < 0 && err != -ENOTEMPTY) {
// we don't want an empty data dir by now
- derr << "error accessing '" << g_conf->mon_data << "': "
+ derr << "error accessing '" << g_conf()->mon_data << "': "
<< cpp_strerror(-err) << dendl;
exit(1);
}
{
// check fs stats. don't start if it's critically close to full.
ceph_data_stats_t stats;
- int err = get_fs_stats(stats, g_conf->mon_data.c_str());
+ int err = get_fs_stats(stats, g_conf()->mon_data.c_str());
if (err < 0) {
derr << "error checking monitor data's fs stats: " << cpp_strerror(err)
<< dendl;
exit(-err);
}
- if (stats.avail_percent <= g_conf->mon_data_avail_crit) {
+ if (stats.avail_percent <= g_conf()->mon_data_avail_crit) {
derr << "error: monitor data filesystem reached concerning levels of"
<< " available storage space (available: "
<< stats.avail_percent << "% " << byte_u_t(stats.byte_avail)
<< ")\nyou may adjust 'mon data avail crit' to a lower value"
- << " to make this go away (default: " << g_conf->mon_data_avail_crit
+ << " to make this go away (default: " << g_conf()->mon_data_avail_crit
<< "%)\n" << dendl;
exit(ENOSPC);
}
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
- MonitorDBStore *store = new MonitorDBStore(g_conf->mon_data);
+ MonitorDBStore *store = new MonitorDBStore(g_conf()->mon_data);
{
ostringstream oss;
err = store->open(oss);
derr << oss.str() << dendl;
if (err < 0) {
derr << "error opening mon data directory at '"
- << g_conf->mon_data << "': " << cpp_strerror(err) << dendl;
+ << g_conf()->mon_data << "': " << cpp_strerror(err) << dendl;
prefork.exit(1);
}
}
// this is what i will bind to
entity_addr_t ipaddr;
- if (monmap.contains(g_conf->name.get_id())) {
- ipaddr = monmap.get_addr(g_conf->name.get_id());
+ if (monmap.contains(g_conf()->name.get_id())) {
+ ipaddr = monmap.get_addr(g_conf()->name.get_id());
// print helpful warning if the conf file doesn't match
entity_addr_t conf_addr;
std::vector <std::string> my_sections;
- g_conf->get_my_sections(my_sections);
+ g_conf().get_my_sections(my_sections);
std::string mon_addr_str;
- if (g_conf->get_val_from_conf_file(my_sections, "mon addr",
+ if (g_conf().get_val_from_conf_file(my_sections, "mon addr",
mon_addr_str, true) == 0) {
if (conf_addr.parse(mon_addr_str.c_str())) {
if (conf_addr.get_port() == 0)
<< " continuing with monmap configuration" << dendl;
}
} else {
- dout(0) << g_conf->name << " does not exist in monmap, will attempt to join an existing cluster" << dendl;
+ dout(0) << g_conf()->name << " does not exist in monmap, will attempt to join an existing cluster" << dendl;
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
- if (!g_conf->public_addr.is_blank_ip()) {
- ipaddr = g_conf->public_addr;
+ if (!g_conf()->public_addr.is_blank_ip()) {
+ ipaddr = g_conf()->public_addr;
if (ipaddr.get_port() == 0)
ipaddr.set_port(CEPH_MON_PORT_LEGACY);
- dout(0) << "using public_addr " << g_conf->public_addr << " -> "
+ dout(0) << "using public_addr " << g_conf()->public_addr << " -> "
<< ipaddr << dendl;
} else {
MonMap tmpmap;
<< cpp_strerror(err) << dendl;
prefork.exit(1);
}
- if (tmpmap.contains(g_conf->name.get_id())) {
- ipaddr = tmpmap.get_addr(g_conf->name.get_id());
+ if (tmpmap.contains(g_conf()->name.get_id())) {
+ ipaddr = tmpmap.get_addr(g_conf()->name.get_id());
} else {
- derr << "no public_addr or public_network specified, and " << g_conf->name
+ derr << "no public_addr or public_network specified, and " << g_conf()->name
<< " not present in monmap or ceph.conf" << dendl;
prefork.exit(1);
}
}
// bind
- int rank = monmap.get_rank(g_conf->name.get_id());
- std::string public_msgr_type = g_conf->ms_public_type.empty() ? g_conf->get_val<std::string>("ms_type") : g_conf->ms_public_type;
+ int rank = monmap.get_rank(g_conf()->name.get_id());
+ std::string public_msgr_type = g_conf()->ms_public_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_public_type;
Messenger *msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MON(rank), "mon",
0, Messenger::HAS_MANY_CONNECTIONS);
// throttle client traffic
Throttle *client_throttler = new Throttle(g_ceph_context, "mon_client_bytes",
- g_conf->mon_client_bytes);
+ g_conf()->mon_client_bytes);
msgr->set_policy_throttlers(entity_name_t::TYPE_CLIENT,
client_throttler, NULL);
// NOTE: actual usage on the leader may multiply by the number of
// monitors if they forward large update messages from daemons.
Throttle *daemon_throttler = new Throttle(g_ceph_context, "mon_daemon_bytes",
- g_conf->mon_daemon_bytes);
+ g_conf()->mon_daemon_bytes);
msgr->set_policy_throttlers(entity_name_t::TYPE_OSD, daemon_throttler,
NULL);
msgr->set_policy_throttlers(entity_name_t::TYPE_MDS, daemon_throttler,
entity_addr_t public_addr = ipaddr;
// check if the public_bind_addr option is set
- if (!g_conf->public_bind_addr.is_blank_ip()) {
- bind_addr = g_conf->public_bind_addr;
+ if (!g_conf()->public_bind_addr.is_blank_ip()) {
+ bind_addr = g_conf()->public_bind_addr;
// set the default port if not already set
if (bind_addr.get_port() == 0) {
}
}
- dout(0) << "starting " << g_conf->name << " rank " << rank
+ dout(0) << "starting " << g_conf()->name << " rank " << rank
<< " at public addr " << public_addr
<< " at bind addr " << bind_addr
- << " mon_data " << g_conf->mon_data
+ << " mon_data " << g_conf()->mon_data
<< " fsid " << monmap.get_fsid()
<< dendl;
prefork.exit(1);
}
- dout(0) << "starting " << g_conf->name << " rank " << rank
+ dout(0) << "starting " << g_conf()->name << " rank " << rank
<< " at " << ipaddr
- << " mon_data " << g_conf->mon_data
+ << " mon_data " << g_conf()->mon_data
<< " fsid " << monmap.get_fsid()
<< dendl;
// start monitor
- mon = new Monitor(g_ceph_context, g_conf->name.get_id(), store,
+ mon = new Monitor(g_ceph_context, g_conf()->name.get_id(), store,
msgr, mgr_msgr, &monmap);
if (force_sync) {
prefork.exit(1);
}
- if (compact || g_conf->mon_compact_on_start) {
+ if (compact || g_conf()->mon_compact_on_start) {
derr << "compacting monitor store ..." << dendl;
mon->store->compact();
derr << "done compacting" << dendl;
}
- if (g_conf->daemonize) {
+ if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
prefork.daemonize();
}
register_async_signal_handler_oneshot(SIGINT, handle_mon_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_mon_signal);
- if (g_conf->inject_early_sigterm)
+ if (g_conf()->inject_early_sigterm)
kill(getpid(), SIGTERM);
msgr->wait();
global_init_chdir(g_ceph_context);
if (get_journal_fsid) {
- device_path = g_conf->get_val<std::string>("osd_journal");
+ device_path = g_conf().get_val<std::string>("osd_journal");
get_device_fsid = true;
}
if (get_device_fsid) {
// whoami
char *end;
- const char *id = g_conf->name.get_id().c_str();
+ const char *id = g_conf()->name.get_id().c_str();
int whoami = strtol(id, &end, 10);
- std::string data_path = g_conf->get_val<std::string>("osd_data");
+ std::string data_path = g_conf().get_val<std::string>("osd_data");
if (*end || end == id || whoami < 0) {
derr << "must specify '-i #' where # is the osd number" << dendl;
forker.exit(1);
}
// the store
- std::string store_type = g_conf->get_val<std::string>("osd_objectstore");
+ std::string store_type = g_conf().get_val<std::string>("osd_objectstore");
{
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/type", data_path.c_str());
}
}
- std::string journal_path = g_conf->get_val<std::string>("osd_journal");
- uint32_t flags = g_conf->get_val<uint64_t>("osd_os_flags");
+ std::string journal_path = g_conf().get_val<std::string>("osd_journal");
+ uint32_t flags = g_conf().get_val<uint64_t>("osd_os_flags");
ObjectStore *store = ObjectStore::create(g_ceph_context,
store_type,
data_path,
forker.exit(1);
}
- EntityName ename(g_conf->name);
+ EntityName ename{g_conf()->name};
EntityAuth eauth;
- std::string keyring_path = g_conf->get_val<std::string>("keyring");
+ std::string keyring_path = g_conf().get_val<std::string>("keyring");
int ret = keyring->load(g_ceph_context, keyring_path);
if (ret == 0 &&
keyring->get_auth(ename, eauth)) {
if (mkfs) {
common_init_finish(g_ceph_context);
- if (g_conf->get_val<uuid_d>("fsid").is_zero()) {
+ if (g_conf().get_val<uuid_d>("fsid").is_zero()) {
derr << "must specify cluster fsid" << dendl;
forker.exit(-EINVAL);
}
int err = OSD::mkfs(g_ceph_context, store, data_path,
- g_conf->get_val<uuid_d>("fsid"),
+ g_conf().get_val<uuid_d>("fsid"),
whoami);
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error creating empty object store in "
}
dout(0) << "created object store " << data_path
<< " for osd." << whoami
- << " fsid " << g_conf->get_val<uuid_d>("fsid")
+ << " fsid " << g_conf().get_val<uuid_d>("fsid")
<< dendl;
}
if (mkfs || mkkey) {
forker.exit(0);
}
- std::string msg_type = g_conf->get_val<std::string>("ms_type");
+ std::string msg_type = g_conf().get_val<std::string>("ms_type");
std::string public_msg_type =
- g_conf->get_val<std::string>("ms_public_type");
+ g_conf().get_val<std::string>("ms_public_type");
std::string cluster_msg_type =
- g_conf->get_val<std::string>("ms_cluster_type");
+ g_conf().get_val<std::string>("ms_cluster_type");
public_msg_type = public_msg_type.empty() ? msg_type : public_msg_type;
cluster_msg_type = cluster_msg_type.empty() ? msg_type : cluster_msg_type;
<< std::endl;
uint64_t message_size =
- g_conf->get_val<uint64_t>("osd_client_message_size_cap");
+ g_conf().get_val<uint64_t>("osd_client_message_size_cap");
boost::scoped_ptr<Throttle> client_byte_throttler(
new Throttle(g_ceph_context, "osd_client_bytes", message_size));
if (ms_cluster->bindv(cluster_addrs) < 0)
forker.exit(1);
- bool is_delay = g_conf->get_val<bool>("osd_heartbeat_use_min_delay_socket");
+ bool is_delay = g_conf().get_val<bool>("osd_heartbeat_use_min_delay_socket");
if (is_delay) {
ms_hb_front_client->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_back_client->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
// -- daemonize --
- if (g_conf->daemonize) {
+ if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
osd->final_init();
- if (g_conf->get_val<bool>("inject_early_sigterm"))
+ if (g_conf().get_val<bool>("inject_early_sigterm"))
kill(getpid(), SIGTERM);
ms_public->wait();
for (int i=0; i<num_client; i++) {
messengers[i] = Messenger::create_client_messenger(g_ceph_context,
"synclient");
- messengers[i]->bind(g_conf->public_addr);
+ messengers[i]->bind(g_conf()->public_addr);
mclients[i] = new MonClient(g_ceph_context);
mclients[i]->build_initial_monmap();
auto client = new StandaloneClient(messengers[i], mclients[i]);
client_lock.Unlock();
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
AdminSocket* admin_socket = cct->get_admin_socket();
int ret = admin_socket->register_command("mds_requests",
_close_sessions();
client_lock.Unlock();
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
cct->get_admin_socket()->unregister_commands(&m_command_hook);
"failed to remount (to trim kernel dentries): "
"return code = " << r << dendl;
}
- bool should_abort = cct->_conf->get_val<bool>("client_die_on_failed_remount") ||
- cct->_conf->get_val<bool>("client_die_on_failed_dentry_invalidate");
+ bool should_abort = cct->_conf.get_val<bool>("client_die_on_failed_remount") ||
+ cct->_conf.get_val<bool>("client_die_on_failed_dentry_invalidate");
if (should_abort && !unmounting) {
lderr(cct) << "failed to remount for kernel dentry trimming; quitting!" << dendl;
ceph_abort();
{
if (cct->_conf->client_debug_inject_tick_delay > 0) {
sleep(cct->_conf->client_debug_inject_tick_delay);
- assert(0 == cct->_conf->set_val("client_debug_inject_tick_delay", "0"));
- cct->_conf->apply_changes(NULL);
+ assert(0 == cct->_conf.set_val("client_debug_inject_tick_delay", "0"));
+ cct->_conf.apply_changes(NULL);
}
ldout(cct, 21) << "tick" << dendl;
<< ccap_string(in->caps_issued()) << dendl;
}
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
f->readahead.set_trigger_requests(1);
f->readahead.set_min_readahead_size(conf->client_readahead_min);
uint64_t max_readahead = Readahead::NO_LIMIT;
bool movepos = false;
std::unique_ptr<C_SaferCond> onuninline;
int64_t r = 0;
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
Inode *in = f->inode.get();
if ((f->mode & CEPH_FILE_MODE_RD) == 0)
int Client::_read_async(Fh *f, uint64_t off, uint64_t len, bufferlist *bl)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
Inode *in = f->inode.get();
ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl;
r = _do_remount();
}
if (r) {
- bool should_abort = cct->_conf->get_val<bool>("client_die_on_failed_dentry_invalidate");
+ bool should_abort = cct->_conf.get_val<bool>("client_die_on_failed_dentry_invalidate");
if (should_abort) {
lderr(cct) << "no method to invalidate kernel dentry cache; quitting!" << dendl;
ceph_abort();
return -ENOTCONN;
int r = 0;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
if (strcmp(name, ".") && strcmp(name, "..")) {
return -ENOTCONN;
int r = 0;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
r = may_lookup(parent, perms);
tout(cct) << stx->stx_btime << std::endl;
tout(cct) << mask << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int res = may_setattr(in, stx, mask, perms);
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = xattr_permission(in, name, MAY_READ, perms);
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = xattr_permission(in, name, MAY_WRITE, perms);
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = xattr_permission(in, name, MAY_WRITE, perms);
tout(cct) << mode << std::endl;
tout(cct) << rdev << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_create(parent, perms);
tout(cct) << mode << std::endl;
tout(cct) << rdev << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_create(parent, perms);
tout(cct) << name << std::endl;
tout(cct) << mode << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_create(parent, perm);
tout(cct) << name << std::endl;
tout(cct) << mode << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_create(parent, perms);
tout(cct) << name << std::endl;
tout(cct) << value << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_create(parent, perms);
tout(cct) << name << std::endl;
tout(cct) << value << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_create(parent, perms);
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_delete(in, name, perm);
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_delete(in, name, perms);
tout(cct) << vnewparent.ino.val << std::endl;
tout(cct) << newname << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_delete(parent, name, perm);
InodeRef target;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
if (S_ISDIR(in->mode))
tout(cct) << "ll_opendir" << std::endl;
tout(cct) << vino.ino.val << std::endl;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
int r = may_open(in, flags, perms);
tout(cct) << ceph_flags_sys2wire(flags) << std::endl;
int r;
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
r = may_open(in, flags, perms);
return -EEXIST;
if (r == -ENOENT && (flags & O_CREAT)) {
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
r = may_create(parent, perms);
ldout(cct, 20) << "_ll_create created = " << created << dendl;
if (!created) {
- auto fuse_default_permissions = cct->_conf->get_val<bool>(
+ auto fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
if (!fuse_default_permissions) {
r = may_open(in->get(), flags, perms);
case MetaSession::STATE_OPEN:
{
objecter->maybe_request_map(); /* to check if we are blacklisted */
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
if (conf->client_reconnect_stale) {
ldout(cct, 1) << "reset from mds we were open; close mds session for reconnect" << dendl;
_closed_mds_session(s);
if (m_cct->_conf->heartbeat_inject_failure) {
ldout(m_cct, 0) << "is_healthy injecting failure for next " << m_cct->_conf->heartbeat_inject_failure << " seconds" << dendl;
m_inject_unhealthy_until = now + std::chrono::seconds(m_cct->_conf->heartbeat_inject_failure);
- m_cct->_conf->set_val("heartbeat_inject_failure", "0");
+ m_cct->_conf.set_val("heartbeat_inject_failure", "0");
}
bool healthy = true;
return r;
}
- fsid = cct->_conf->get_val<uuid_d>("fsid");
+ fsid = cct->_conf.get_val<uuid_d>("fsid");
host = cct->_conf->host;
return 0;
}
// std::string fname = cct->_conf->plugin_dir + "/" + type + "/" PLUGIN_PREFIX
// + name + PLUGIN_SUFFIX;
- std::string fname = cct->_conf->get_val<std::string>("plugin_dir") + "/" + type + "/" + PLUGIN_PREFIX
+ std::string fname = cct->_conf.get_val<std::string>("plugin_dir") + "/" + type + "/" + PLUGIN_PREFIX
+ name + PLUGIN_SUFFIX;
void *library = dlopen(fname.c_str(), RTLD_NOW);
if (!library) {
string err1(dlerror());
// fall back to plugin_dir
- std::string fname2 = cct->_conf->get_val<std::string>("plugin_dir") + "/" + PLUGIN_PREFIX +
+ std::string fname2 = cct->_conf.get_val<std::string>("plugin_dir") + "/" + PLUGIN_PREFIX +
name + PLUGIN_SUFFIX;
library = dlopen(fname2.c_str(), RTLD_NOW);
if (!library) {
const char *config_key)
: m_cct(cct), m_library(library), m_config_keys{config_key, NULL},
m_lock("TracepointProvider::m_lock") {
- m_cct->_conf->add_observer(this);
+ m_cct->_conf.add_observer(this);
verify_config(m_cct->_conf);
}
TracepointProvider::~TracepointProvider() {
- m_cct->_conf->remove_observer(this);
+ m_cct->_conf.remove_observer(this);
if (m_handle) {
dlclose(m_handle);
}
void TracepointProvider::handle_conf_change(
const md_config_t *conf, const std::set<std::string> &changed) {
if (changed.count(m_config_keys[0])) {
- verify_config(conf);
+ verify_config(ConfigProxy{conf});
}
}
-void TracepointProvider::verify_config(const md_config_t *conf) {
+void TracepointProvider::verify_config(const ConfigProxy& conf) {
Mutex::Locker locker(m_lock);
if (m_handle) {
return;
char buf[10];
char *pbuf = buf;
- if (conf->get_val(m_config_keys[0], &pbuf, sizeof(buf)) != 0 ||
+ if (conf.get_val(m_config_keys[0], &pbuf, sizeof(buf)) != 0 ||
strncmp(buf, "true", 5) != 0) {
return;
}
Mutex m_lock;
void* m_handle = nullptr;
- void verify_config(const md_config_t *conf);
+ void verify_config(const ConfigProxy& conf);
};
#endif // CEPH_TRACEPOINT_PROVIDER_H
if (_thread_num_option.length()) {
ldout(cct, 10) << " registering config observer on " << _thread_num_option << dendl;
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
}
_lock.Lock();
if (_thread_num_option.length()) {
ldout(cct, 10) << " unregistering config observer on " << _thread_num_option << dendl;
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
}
_lock.Lock();
void handle_conf_change(const md_config_t *conf,
const std::set <std::string> &changed) override {
- if (conf->lockdep && !m_registered) {
+ if (conf->values.lockdep && !m_registered) {
lockdep_register_ceph_context(m_cct);
m_registered = true;
- } else if (!conf->lockdep && m_registered) {
+ } else if (!conf->values.lockdep && m_registered) {
lockdep_unregister_ceph_context(m_cct);
m_registered = false;
}
public:
explicit MempoolObs(CephContext *cct) : cct(cct) {
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
int r = cct->get_admin_socket()->register_command(
"dump_mempools",
"dump_mempools",
assert(r == 0);
}
~MempoolObs() override {
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
cct->get_admin_socket()->unregister_command("dump_mempools");
}
return KEYS;
}
- void handle_conf_change(const md_config_t *conf,
+ void handle_conf_change(const md_config_t *mconf,
const std::set <std::string> &changed) override {
+ ConfigReader conf{mconf};
// stderr
if (changed.count("log_to_stderr") || changed.count("err_to_stderr")) {
int l = conf->log_to_stderr ? 99 : (conf->err_to_stderr ? -1 : -2);
}
if (changed.count("log_stderr_prefix")) {
- log->set_log_stderr_prefix(conf->get_val<string>("log_stderr_prefix"));
+ log->set_log_stderr_prefix(conf.get_val<string>("log_stderr_prefix"));
}
if (changed.count("log_max_new")) {
}
if (changed.find("log_coarse_timestamps") != changed.end()) {
- log->set_coarse_timestamps(conf->get_val<bool>("log_coarse_timestamps"));
+ log->set_coarse_timestamps(conf.get_val<bool>("log_coarse_timestamps"));
}
// metadata
}
if (log->graylog() && changed.count("fsid")) {
- log->graylog()->set_fsid(conf->get_val<uuid_d>("fsid"));
+ log->graylog()->set_fsid(conf.get_val<uuid_d>("fsid"));
}
}
};
return KEYS;
}
- void handle_conf_change(const md_config_t *conf,
+ void handle_conf_change(const md_config_t *mconf,
const std::set <std::string> &changed) override {
if (changed.count(
"enable_experimental_unrecoverable_data_corrupting_features")) {
+ ConfigReader conf{mconf};
std::lock_guard<ceph::spinlock> lg(cct->_feature_lock);
get_str_set(
conf->enable_experimental_unrecoverable_data_corrupting_features,
boost::replace_all(section, " ", "_");
f->open_object_section(section.c_str());
if (command == "config show") {
- _conf->show_config(f);
+ _conf.show_config(f);
}
else if (command == "config unset") {
std::string var;
if (!(cmd_getval(this, cmdmap, "var", var))) {
f->dump_string("error", "syntax error: 'config unset <var>'");
} else {
- int r = _conf->rm_val(var.c_str());
+ int r = _conf.rm_val(var.c_str());
if (r < 0 && r != -ENOENT) {
f->dump_stream("error") << "error unsetting '" << var << "': "
<< cpp_strerror(r);
} else {
ostringstream ss;
- _conf->apply_changes(&ss);
+ _conf.apply_changes(&ss);
f->dump_string("success", ss.str());
}
}
} else {
// val may be multiple words
string valstr = str_join(val, " ");
- int r = _conf->set_val(var.c_str(), valstr.c_str());
+ int r = _conf.set_val(var.c_str(), valstr.c_str());
if (r < 0) {
f->dump_stream("error") << "error setting '" << var << "' to '" << valstr << "': " << cpp_strerror(r);
} else {
ostringstream ss;
- _conf->apply_changes(&ss);
+ _conf.apply_changes(&ss);
f->dump_string("success", ss.str());
}
}
char buf[4096];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- int r = _conf->get_val(var.c_str(), &tmp, sizeof(buf));
+ int r = _conf.get_val(var.c_str(), &tmp, sizeof(buf));
if (r < 0) {
f->dump_stream("error") << "error getting '" << var << "': " << cpp_strerror(r);
} else {
if (cmd_getval(this, cmdmap, "var", var)) {
// Output a single one
std::string key = ConfFile::normalize_key_name(var);
- const auto &i = _conf->schema.find(key);
- if (i == _conf->schema.end()) {
+ auto schema = _conf.get_schema(key);
+ if (!schema) {
std::ostringstream msg;
msg << "Setting not found: '" << key << "'";
f->dump_string("error", msg.str());
} else {
- f->dump_object("option", i->second);
+ f->dump_object("option", *schema);
}
} else {
// Output all
}
} else if (command == "config diff") {
f->open_object_section("diff");
- _conf->diff(f);
+ _conf.diff(f);
f->close_section(); // unknown
} else if (command == "config diff get") {
std::string setting;
f->open_object_section("diff");
- _conf->diff(f, setting);
+ _conf.diff(f, setting);
f->close_section(); // unknown
} else if (command == "log flush") {
_log->flush();
enum code_environment_t code_env,
int init_flags_)
: nref(1),
- _conf(new md_config_t(code_env == CODE_ENVIRONMENT_DAEMON)),
+ _conf{code_env == CODE_ENVIRONMENT_DAEMON},
_log(NULL),
_module_type(module_type_),
_init_flags(init_flags_),
_log->start();
_log_obs = new LogObs(_log);
- _conf->add_observer(_log_obs);
+ _conf.add_observer(_log_obs);
_cct_obs = new CephContextObs(this);
- _conf->add_observer(_cct_obs);
+ _conf.add_observer(_cct_obs);
_lockdep_obs = new LockdepObs(this);
- _conf->add_observer(_lockdep_obs);
+ _conf.add_observer(_lockdep_obs);
_perf_counters_collection = new PerfCountersCollection(this);
delete _perf_counters_conf_obs;
_perf_counters_conf_obs = NULL;
- _conf->remove_observer(_log_obs);
+ _conf.remove_observer(_log_obs);
delete _log_obs;
_log_obs = NULL;
- _conf->remove_observer(_cct_obs);
+ _conf.remove_observer(_cct_obs);
delete _cct_obs;
_cct_obs = NULL;
- _conf->remove_observer(_lockdep_obs);
+ _conf.remove_observer(_lockdep_obs);
delete _lockdep_obs;
_lockdep_obs = NULL;
delete _log;
_log = NULL;
- delete _conf;
-
delete _crypto_none;
delete _crypto_aes;
if (_crypto_inited > 0) {
// Trigger callbacks on any config observers that were waiting for
// it to become safe to start threads.
- _conf->set_safe_to_start_threads();
- _conf->call_all_observers();
+ _conf.set_safe_to_start_threads();
+ _conf.call_all_observers();
// start admin socket
if (_conf->admin_socket.length())
#include "common/cmdparse.h"
#include "common/code_environment.h"
-#include "common/config_fwd.h"
+#include "common/config_proxy.h"
#include "include/spinlock.h"
}
void put();
- md_config_t *_conf;
+ ConfigProxy _conf;
ceph::logging::Log *_log;
/* init ceph::crypto */
// Create a configuration object
CephContext *cct = new CephContext(iparams.module_type, code_env, flags);
- md_config_t *conf = cct->_conf;
+ auto& conf = cct->_conf;
// add config observers here
// Set up our entity name.
// for backward compatibility. moving forward, we want all keyrings
// in these locations. the mon already forces $mon_data/keyring.
if (conf->name.is_mds()) {
- conf->set_val_default("keyring", "$mds_data/keyring");
+ conf.set_val_default("keyring", "$mds_data/keyring");
} else if (conf->name.is_osd()) {
- conf->set_val_default("keyring", "$osd_data/keyring");
+ conf.set_val_default("keyring", "$osd_data/keyring");
}
if ((flags & CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS)) {
// make this unique despite multiple instances by the same name.
- conf->set_val_default("admin_socket",
+ conf.set_val_default("admin_socket",
"$run_dir/$cluster-$name.$pid.$cctid.asok");
}
if (code_env == CODE_ENVIRONMENT_LIBRARY ||
code_env == CODE_ENVIRONMENT_UTILITY_NODOUT) {
- conf->set_val_default("log_to_stderr", "false");
- conf->set_val_default("err_to_stderr", "false");
- conf->set_val_default("log_flush_on_exit", "false");
+ conf.set_val_default("log_to_stderr", "false");
+ conf.set_val_default("err_to_stderr", "false");
+ conf.set_val_default("log_flush_on_exit", "false");
}
if (code_env != CODE_ENVIRONMENT_DAEMON) {
// NOTE: disable ms subsystem gathering in clients by default
- conf->set_val_default("debug_ms", "0/0");
+ conf.set_val_default("debug_ms", "0/0");
}
return cct;
cct->get_admin_socket()->chown(cct->get_set_uid(), cct->get_set_gid());
}
- md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
if (!conf->admin_socket.empty() && !conf->admin_socket_mode.empty()) {
int ret = 0;
template<LockPolicy lp>
md_config_impl<lp>::md_config_impl(bool is_daemon)
: is_daemon(is_daemon),
- cluster(""),
lock("md_config_t", true, false)
{
// Load the compile-time list of Option into
}
// Define the debug_* options as well.
- subsys_options.reserve(subsys.get_num());
- for (unsigned i = 0; i < subsys.get_num(); ++i) {
- string name = string("debug_") + subsys.get_name(i);
+ subsys_options.reserve(values.subsys.get_num());
+ for (unsigned i = 0; i < values.subsys.get_num(); ++i) {
+ string name = string("debug_") + values.subsys.get_name(i);
subsys_options.push_back(
Option(name, Option::TYPE_STR, Option::LEVEL_ADVANCED));
Option& opt = subsys_options.back();
- opt.set_default(stringify(subsys.get_log_level(i)) + "/" +
- stringify(subsys.get_gather_level(i)));
- string desc = string("Debug level for ") + subsys.get_name(i);
+ opt.set_default(stringify(values.subsys.get_log_level(i)) + "/" +
+ stringify(values.subsys.get_gather_level(i)));
+ string desc = string("Debug level for ") + values.subsys.get_name(i);
opt.set_description(desc.c_str());
opt.set_flag(Option::FLAG_RUNTIME);
opt.set_long_description("The value takes the form 'N' or 'N/M' where N and M are values between 0 and 99. N is the debug level to log (all values below this are included), and M is the level to gather and buffer in memory. In the event of a crash, the most recent items <= M are dumped to the log file.");
// members if present.
legacy_values = {
#define OPTION(name, type) \
- {std::string(STRINGIFY(name)), &md_config_impl::name},
+ {std::string(STRINGIFY(name)), &ConfigValues::name},
#define SAFE_OPTION(name, type) OPTION(name, type)
#include "common/legacy_config_opts.h"
#undef OPTION
ceph_abort();
}
}
- for (const auto& [name,configs] : values) {
- auto j = configs.find(CONF_MON);
- if (j != configs.end()) {
- if (kv.find(name) == kv.end()) {
- ldout(cct,10) << __func__ << " " << name
- << " cleared (was " << Option::to_str(j->second) << ")"
- << dendl;
- _rm_val(name, CONF_MON);
- }
+ values.for_each([&] (auto name, auto configs) {
+ auto config = configs.find(CONF_MON);
+ if (config == configs.end()) {
+ return;
}
- }
+ if (kv.find(name) != kv.end()) {
+ return;
+ }
+ ldout(cct,10) << __func__ << " " << name
+ << " cleared (was " << Option::to_str(config->second) << ")"
+ << dendl;
+ values.rm_val(name, CONF_MON);
+ });
values_bl.clear();
_apply_changes(nullptr);
return 0;
if (safe_to_start_threads)
return -ENOSYS;
- if (!cluster.size() && !conf_files_str) {
+ if (!values.cluster.size() && !conf_files_str) {
/*
* set the cluster name to 'ceph' when neither cluster name nor
* configuration file are specified.
*/
- cluster = "ceph";
+ values.cluster = "ceph";
}
if (!conf_files_str) {
if (c == conf_files.end())
return -ENOENT;
- if (cluster.size() == 0) {
+ if (values.cluster.size() == 0) {
/*
* If cluster name is not set yet, use the prefix of the
* basename of configuration file as cluster name.
* convention, we do the last try and assign the cluster to
* 'ceph'.
*/
- cluster = "ceph";
+ values.cluster = "ceph";
} else {
- cluster = c->substr(start, end - start);
+ values.cluster = c->substr(start, end - start);
}
}
}
if (getenv("CEPH_KEYRING")) {
auto locker = lock();
- string k = getenv("CEPH_KEYRING");
- values["keyring"][CONF_ENV] = Option::value_t(k);
+ _set_val(getenv("CEPH_KEYRING"), *find_option("keyring"),
+ CONF_ENV, nullptr);
}
if (const char *dir = getenv("CEPH_LIB")) {
auto locker = lock();
void md_config_impl<lp>::_show_config(std::ostream *out, Formatter *f)
{
if (out) {
- *out << "name = " << name << std::endl;
- *out << "cluster = " << cluster << std::endl;
+ *out << "name = " << values.name << std::endl;
+ *out << "cluster = " << values.cluster << std::endl;
}
if (f) {
- f->dump_string("name", stringify(name));
- f->dump_string("cluster", cluster);
+ f->dump_string("name", stringify(values.name));
+ f->dump_string("cluster", values.cluster);
}
for (const auto& i: schema) {
const Option &opt = i.second;
do_show_config_value = val;
}
else if (ceph_argparse_flag(args, i, "--no-mon-config", (char*)NULL)) {
- no_mon_config = true;
+ values.no_mon_config = true;
}
else if (ceph_argparse_flag(args, i, "--mon-config", (char*)NULL)) {
- no_mon_config = false;
+ values.no_mon_config = false;
}
else if (ceph_argparse_flag(args, i, "--foreground", "-f", (char*)NULL)) {
set_val_or_die("daemonize", "false");
/*
* apply changes until the cluster name is assigned
*/
- if (cluster.size())
+ if (values.cluster.size())
_apply_changes(oss);
}
for (const auto &i : schema) {
++n;
encode(i.second.name, bl);
- auto j = values.find(i.second.name);
- if (j != values.end()) {
- auto k = j->second.find(CONF_DEFAULT);
- if (k != j->second.end()) {
- encode(Option::to_str(k->second), bl);
- continue;
- }
+ auto [value, found] = values.get_value(i.second.name, CONF_DEFAULT);
+ if (found) {
+ encode(Option::to_str(value), bl);
+ } else {
+ string val;
+ conf_stringify(_get_val_default(i.second), &val);
+ encode(val, bl);
}
- string val;
- conf_stringify(_get_val_default(i.second), &val);
- encode(val, bl);
}
encode(n, defaults_bl);
defaults_bl.claim_append(bl);
if (values_bl.length() == 0) {
uint32_t n = 0;
bufferlist bl;
- for (auto& i : values) {
- if (i.first == "fsid" ||
- i.first == "host") {
- continue;
+ values.for_each([&](auto& name, auto& configs) {
+ if (name == "fsid" ||
+ name == "host") {
+ return;
}
++n;
- encode(i.first, bl);
- encode((uint32_t)i.second.size(), bl);
- for (auto& j : i.second) {
+ encode(name, bl);
+ encode((uint32_t)configs.size(), bl);
+ for (auto& j : configs) {
encode(j.first, bl);
encode(Option::to_str(j.second), bl);
}
- }
+ });
// make sure overridden items appear, and include the default value
for (auto& i : ignored_mon_values) {
- if (values.count(i.first)) {
+ if (values.contains(i.first)) {
continue;
}
if (i.first == "fsid" ||
if (!stack) {
stack = &a_stack;
}
-
- auto p = values.find(o.name);
- if (p != values.end() && !p->second.empty()) {
- // use highest-priority value available (see CONF_*)
- return _expand_meta(p->second.rbegin()->second, &o, stack, err);
- }
-
- return _expand_meta(_get_val_default(o), &o, stack, err);
+ return _expand_meta(_get_val_nometa(o), &o, stack, err);
}
template<LockPolicy lp>
Option::value_t md_config_impl<lp>::_get_val_nometa(const Option& o) const
{
- auto p = values.find(o.name);
- if (p != values.end() && !p->second.empty()) {
- // use highest-priority value available (see CONF_*)
- return p->second.rbegin()->second;
+ if (auto [value, found] = values.get_value(o.name, -1); found) {
+ return value;
+ } else {
+ return _get_val_default(o);
}
- return _get_val_default(o);
}
template<LockPolicy lp>
//cout << " found var " << var << std::endl;
// special metavariable?
if (var == "type") {
- out += name.get_type_name();
+ out += values.name.get_type_name();
} else if (var == "cluster") {
- out += cluster;
+ out += values.cluster;
} else if (var == "name") {
- out += name.to_cstr();
+ out += values.name.to_cstr();
} else if (var == "host") {
- if (host == "") {
+ if (values.host == "") {
out += ceph_get_short_hostname();
} else {
- out += host;
+ out += values.host;
}
} else if (var == "num") {
- out += name.get_id().c_str();
+ out += values.name.get_id().c_str();
} else if (var == "id") {
- out += name.get_id();
+ out += values.name.get_id();
} else if (var == "pid") {
out += stringify(getpid());
if (o) {
void md_config_impl<lp>::_get_my_sections(std::vector <std::string> §ions) const
{
assert(lock.is_locked());
- sections.push_back(name.to_str());
+ sections.push_back(values.name.to_str());
- sections.push_back(name.get_type_name());
+ sections.push_back(values.name.get_type_name());
sections.push_back("global");
}
}
// Apply the value to its entry in the `values` map
- auto p = values.find(opt.name);
- if (p != values.end()) {
- auto q = p->second.find(level);
- if (q != p->second.end()) {
- if (new_value == q->second) {
- // no change!
- return 0;
- }
- q->second = new_value;
- } else {
- p->second[level] = new_value;
- }
+ auto result = values.set_value(opt.name, std::move(new_value), level);
+ switch (result) {
+ case ConfigValues::SET_NO_CHANGE:
+ return 0;
+ case ConfigValues::SET_NO_EFFECT:
values_bl.clear();
- if (p->second.rbegin()->first > level) {
- // there was a higher priority value; no effect
- return 0;
- }
- } else {
+ return 0;
+ case ConfigValues::SET_HAVE_EFFECT:
+ // fallthrough
+ default:
values_bl.clear();
- values[opt.name][level] = new_value;
+ _refresh(opt);
+ return 1;
}
-
- _refresh(opt);
- return 1;
}
template<LockPolicy lp>
if (legacy_ptr_iter != legacy_values.end()) {
update_legacy_val(opt, legacy_ptr_iter->second);
}
-
// Was this a debug_* option update?
if (opt.subsys >= 0) {
string actual_val;
conf_stringify(_get_val(opt), &actual_val);
- int log, gather;
- int r = sscanf(actual_val.c_str(), "%d/%d", &log, &gather);
- if (r >= 1) {
- if (r < 2) {
- gather = log;
- }
- subsys.set_log_level(opt.subsys, log);
- subsys.set_gather_level(opt.subsys, gather);
- }
+ values.set_logging(opt.subsys, actual_val.c_str());
} else {
// normal option, advertise the change.
changed.insert(opt.name);
if (schema.count(key) == 0) {
return -EINVAL;
}
- auto i = values.find(key);
- if (i == values.end()) {
- return -ENOENT;
- }
- auto j = i->second.find(level);
- if (j == i->second.end()) {
- return -ENOENT;
+ auto ret = values.rm_val(key, level);
+ if (ret < 0) {
+ return ret;
}
- bool matters = (j->first == i->second.rbegin()->first);
- i->second.erase(j);
- if (matters) {
+ if (ret == ConfigValues::SET_HAVE_EFFECT) {
_refresh(*find_option(key));
}
values_bl.clear();
md_config_impl::member_ptr_t member_ptr)
{
Option::value_t v = _get_val(opt);
- boost::apply_visitor(assign_visitor(this, v), member_ptr);
+ boost::apply_visitor(assign_visitor(&values, v), member_ptr);
}
static void dump(Formatter *f, int level, Option::value_t in)
string name) const
{
auto locker = lock();
- for (auto& i : values) {
- if (i.second.size() == 1 &&
- i.second.begin()->first == CONF_DEFAULT) {
+ values.for_each([this, f] (auto& name, auto& configs) {
+ if (configs.size() == 1 &&
+ configs.begin()->first == CONF_DEFAULT) {
// we only have a default value; exclude from diff
- continue;
+ return;
}
- f->open_object_section(i.first.c_str());
- const Option *o = find_option(i.first);
+ f->open_object_section(name.c_str());
+ const Option *o = find_option(name);
dump(f, CONF_DEFAULT, _get_val_default(*o));
- for (auto& j : i.second) {
+ for (auto& j : configs) {
dump(f, j.first, j.second);
}
dump(f, CONF_FINAL, _get_val(*o));
f->close_section();
- }
+ });
}
template<LockPolicy lp>
#include <map>
#include <boost/container/small_vector.hpp>
#include "common/ConfUtils.h"
-#include "common/entity_name.h"
#include "common/code_environment.h"
#include "common/Mutex.h"
#include "log/SubsystemMap.h"
#include "common/options.h"
#include "common/subsys_types.h"
#include "common/config_fwd.h"
+#include "common/config_values.h"
#include "common/lock_mutex.h"
class CephContext;
template<LockPolicy lock_policy>
struct md_config_impl {
public:
- typedef boost::variant<int64_t md_config_t::*,
- uint64_t md_config_t::*,
- std::string md_config_t::*,
- double md_config_t::*,
- bool md_config_t::*,
- entity_addr_t md_config_t::*,
- entity_addrvec_t md_config_t::*,
- uuid_d md_config_t::*> member_ptr_t;
+ typedef boost::variant<int64_t ConfigValues::*,
+ uint64_t ConfigValues::*,
+ std::string ConfigValues::*,
+ double ConfigValues::*,
+ bool ConfigValues::*,
+ entity_addr_t ConfigValues::*,
+ entity_addrvec_t ConfigValues::*,
+ uuid_d ConfigValues::*> member_ptr_t;
// For use when intercepting configuration updates
typedef std::function<bool(
/*
* Mapping from legacy config option names to class members
*/
- std::map<std::string, md_config_impl::member_ptr_t> legacy_values;
+ std::map<std::string, member_ptr_t> legacy_values;
/**
* The configuration schema, in the form of Option objects describing
/**
* The current values of all settings described by the schema
*/
- std::map<std::string, map<int32_t,Option::value_t>> values;
+ ConfigValues values;
/// values from mon that we failed to set
std::map<std::string,std::string> ignored_mon_values;
void update_legacy_vals();
- void update_legacy_val(const Option &opt,
- md_config_impl::member_ptr_t member);
+ void update_legacy_val(const Option &opt, member_ptr_t member);
Option::value_t _expand_meta(
const Option::value_t& in,
vector<Option> subsys_options;
public:
- ceph::logging::SubsystemMap subsys;
-
- EntityName name;
string data_dir_option; ///< data_dir config option, if any
- /// cluster name
- string cluster;
-
- bool no_mon_config = false;
-
-// This macro block defines C members of the md_config_t struct
-// corresponding to the definitions in legacy_config_opts.h.
-// These C members are consumed by code that was written before
-// the new options.cc infrastructure: all newer code should
-// be consume options via explicit get() rather than C members.
-#define OPTION_OPT_INT(name) int64_t name;
-#define OPTION_OPT_LONGLONG(name) int64_t name;
-#define OPTION_OPT_STR(name) std::string name;
-#define OPTION_OPT_DOUBLE(name) double name;
-#define OPTION_OPT_FLOAT(name) double name;
-#define OPTION_OPT_BOOL(name) bool name;
-#define OPTION_OPT_ADDR(name) entity_addr_t name;
-#define OPTION_OPT_ADDRVEC(name) entity_addrvec_t name;
-#define OPTION_OPT_U32(name) uint64_t name;
-#define OPTION_OPT_U64(name) uint64_t name;
-#define OPTION_OPT_UUID(name) uuid_d name;
-#define OPTION(name, ty) \
- public: \
- OPTION_##ty(name)
-#define SAFE_OPTION(name, ty) \
- protected: \
- OPTION_##ty(name)
-#include "common/legacy_config_opts.h"
-#undef OPTION_OPT_INT
-#undef OPTION_OPT_LONGLONG
-#undef OPTION_OPT_STR
-#undef OPTION_OPT_DOUBLE
-#undef OPTION_OPT_FLOAT
-#undef OPTION_OPT_BOOL
-#undef OPTION_OPT_ADDR
-#undef OPTION_OPT_ADDRVEC
-#undef OPTION_OPT_U32
-#undef OPTION_OPT_U64
-#undef OPTION_OPT_UUID
-#undef OPTION
-#undef SAFE_OPTION
-
public:
unsigned get_osd_pool_default_min_size() const {
auto min_size = get_val<uint64_t>("osd_pool_default_min_size");
template <typename ValueT>
class md_config_cacher_t : public md_config_obs_t {
- md_config_t& conf;
+ ConfigProxy& conf;
const char* const option_name;
std::atomic<ValueT> value_cache;
}
public:
- md_config_cacher_t(md_config_t& conf,
+ md_config_cacher_t(ConfigProxy& conf,
const char* const option_name)
: conf(conf),
option_name(option_name) {
ceph::internal::md_config_impl<ceph::internal::LockPolicy::MUTEX>;
using md_config_obs_t =
ceph::internal::md_config_obs_impl<ceph::internal::LockPolicy::MUTEX>;
+class ConfigProxy;
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+
+#pragma once
+
+#include <memory>
+#include <type_traits>
+
+#include "common/config.h"
+#include "common/config_fwd.h"
+
+template<bool is_const>
+class ConfigProxyBase {
+protected:
+ using config_t = std::conditional_t<is_const,
+ const md_config_t,
+ md_config_t>;
+ config_t* config;
+ ConfigProxyBase(config_t* c)
+ : config{c}
+ {}
+public:
+ const ConfigValues* operator->() const noexcept {
+ return &config->values;
+ }
+ int get_val(const std::string& key, char** buf, int len) const {
+ return config->get_val(key, buf, len);
+ }
+ int get_val(const std::string &key, std::string *val) const {
+ return config->get_val(key, val);
+ }
+ template<typename T>
+ const T get_val(const std::string& key) const {
+ return config->template get_val<T>(key);
+ }
+ template<typename T, typename Callback, typename...Args>
+ auto with_val(const string& key, Callback&& cb, Args&&... args) const {
+ return config->template with_val<T>(key, std::forward<Callback>(cb),
+ std::forward<Args>(args)...);
+ }
+ const Option* get_schema(const std::string& key) const {
+ auto found = config->schema.find(key);
+ if (found == config->schema.end()) {
+ return nullptr;
+ } else {
+ return &found->second;
+ }
+ }
+ const Option *find_option(const string& name) const {
+ return config->find_option(name);
+ }
+ void diff(Formatter *f, const std::string& name=string{}) const {
+ return config->diff(f, name);
+ }
+ void get_my_sections(std::vector <std::string> §ions) const {
+ config->get_my_sections(sections);
+ }
+ int get_all_sections(std::vector<std::string>& sections) const {
+ return config->get_all_sections(sections);
+ }
+ int get_val_from_conf_file(const std::vector<std::string>& sections,
+ const std::string& key, std::string& out,
+ bool emeta) const {
+ return config->get_val_from_conf_file(sections, key, out, emeta);
+ }
+ unsigned get_osd_pool_default_min_size() const {
+ return config->get_osd_pool_default_min_size();
+ }
+ void early_expand_meta(std::string &val,
+ std::ostream *oss) const {
+ return config->early_expand_meta(val, oss);
+ }
+};
+
+class ConfigReader final : public ConfigProxyBase<true> {
+public:
+ explicit ConfigReader(const md_config_t* config)
+ : ConfigProxyBase<true>{config}
+ {}
+};
+
+class ConfigProxy final : public ConfigProxyBase<false> {
+ std::unique_ptr<md_config_t> conf;
+public:
+ explicit ConfigProxy(bool is_daemon)
+ : ConfigProxyBase{nullptr},
+ conf{std::make_unique<md_config_t>(is_daemon)}
+ {
+ config = conf.get();
+ }
+ void add_observer(md_config_obs_t* obs) {
+ config->add_observer(obs);
+ }
+ void remove_observer(md_config_obs_t* obs) {
+ config->remove_observer(obs);
+ }
+ void set_safe_to_start_threads() {
+ config->set_safe_to_start_threads();
+ }
+ void _clear_safe_to_start_threads() {
+ config->_clear_safe_to_start_threads();
+ }
+ void call_all_observers() {
+ config->call_all_observers();
+ }
+ void show_config(std::ostream& out) {
+ config->show_config(out);
+ }
+ void show_config(Formatter *f) {
+ config->show_config(f);
+ }
+ void config_options(Formatter *f) {
+ config->config_options(f);
+ }
+ int rm_val(const std::string& key) {
+ return config->rm_val(key);
+ }
+ void apply_changes(std::ostream* oss) {
+ config->apply_changes(oss);
+ }
+ int set_val(const std::string& key, const string& s,
+ std::stringstream* err_ss=nullptr) {
+ return config->set_val(key, s, err_ss);
+ }
+ void set_val_default(const std::string& key, const std::string& val) {
+ config->set_val_default(key, val);
+ }
+ void set_val_or_die(const std::string& key, const std::string& val) {
+ config->set_val_or_die(key, val);
+ }
+ int set_mon_vals(CephContext *cct,
+ const map<std::string,std::string>& kv,
+ md_config_t::config_callback config_cb) {
+ return config->set_mon_vals(cct, kv, config_cb);
+ }
+ int injectargs(const std::string &s, std::ostream *oss) {
+ return config->injectargs(s, oss);
+ }
+ void parse_env(const char *env_var = "CEPH_ARGS") {
+ config->parse_env(env_var);
+ }
+ int parse_argv(std::vector<const char*>& args, int level=CONF_CMDLINE) {
+ return config->parse_argv(args, level);
+ }
+ int parse_config_files(const char *conf_files,
+ std::ostream *warnings, int flags) {
+ return config->parse_config_files(conf_files, warnings, flags);
+ }
+ void complain_about_parse_errors(CephContext *cct) {
+ return config->complain_about_parse_errors(cct);
+ }
+ void do_argv_commands() {
+ config->do_argv_commands();
+ }
+ void get_config_bl(uint64_t have_version,
+ bufferlist *bl,
+ uint64_t *got_version) {
+ config->get_config_bl(have_version, bl, got_version);
+ }
+ void get_defaults_bl(bufferlist *bl) {
+ config->get_defaults_bl(bl);
+ }
+};
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+#include "config_values.h"
+
+#include "config.h"
+
+ConfigValues::set_value_result_t
+ConfigValues::set_value(const std::string& key,
+ Option::value_t&& new_value,
+ int level)
+{
+ if (auto p = values.find(key); p != values.end()) {
+ auto q = p->second.find(level);
+ if (q != p->second.end()) {
+ if (new_value == q->second) {
+ return SET_NO_CHANGE;
+ }
+ q->second = std::move(new_value);
+ } else {
+ p->second[level] = std::move(new_value);
+ }
+ if (p->second.rbegin()->first > level) {
+ // there was a higher priority value; no effect
+ return SET_NO_EFFECT;
+ } else {
+ return SET_HAVE_EFFECT;
+ }
+ } else {
+ values[key][level] = std::move(new_value);
+ return SET_HAVE_EFFECT;
+ }
+}
+
+int ConfigValues::rm_val(const std::string& key, int level)
+{
+ auto i = values.find(key);
+ if (i == values.end()) {
+ return -ENOENT;
+ }
+ auto j = i->second.find(level);
+ if (j == i->second.end()) {
+ return -ENOENT;
+ }
+ bool matters = (j->first == i->second.rbegin()->first);
+ i->second.erase(j);
+ if (matters) {
+ return SET_HAVE_EFFECT;
+ } else {
+ return SET_NO_EFFECT;
+ }
+}
+
+std::pair<Option::value_t, bool>
+ConfigValues::get_value(const std::string& name, int level) const
+{
+ auto p = values.find(name);
+ if (p != values.end() && !p->second.empty()) {
+ // use highest-priority value available (see CONF_*)
+ if (level < 0) {
+ return {p->second.rbegin()->second, true};
+ } else if (auto found = p->second.find(level);
+ found != p->second.end()) {
+ return {found->second, true};
+ }
+ }
+ return {Option::value_t{}, false};
+}
+
+void ConfigValues::set_logging(int which, const char* val)
+{
+ int log, gather;
+ int r = sscanf(val, "%d/%d", &log, &gather);
+ if (r >= 1) {
+ if (r < 2) {
+ gather = log;
+ }
+ subsys.set_log_level(which, log);
+ subsys.set_gather_level(which, gather);
+ }
+}
+
+bool ConfigValues::contains(const std::string& key) const
+{
+ return values.count(key);
+}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+
+#pragma once
+
+#include <cstdint>
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+
+#include "common/entity_name.h"
+#include "common/options.h"
+#include "log/SubsystemMap.h"
+#include "msg/msg_types.h"
+
+class ConfigValues {
+ using changed_set_t = std::set<std::string>;
+ using values_t = std::map<std::string, map<int32_t,Option::value_t>>;
+ values_t values;
+
+public:
+ EntityName name;
+ /// cluster name
+ string cluster;
+ ceph::logging::SubsystemMap subsys;
+ bool no_mon_config = false;
+
+// This macro block defines C members of the md_config_t struct
+// corresponding to the definitions in legacy_config_opts.h.
+// These C members are consumed by code that was written before
+// the new options.cc infrastructure: all newer code should
+// be consume options via explicit get() rather than C members.
+#define OPTION_OPT_INT(name) int64_t name;
+#define OPTION_OPT_LONGLONG(name) int64_t name;
+#define OPTION_OPT_STR(name) std::string name;
+#define OPTION_OPT_DOUBLE(name) double name;
+#define OPTION_OPT_FLOAT(name) double name;
+#define OPTION_OPT_BOOL(name) bool name;
+#define OPTION_OPT_ADDR(name) entity_addr_t name;
+#define OPTION_OPT_ADDRVEC(name) entity_addrvec_t name;
+#define OPTION_OPT_U32(name) uint64_t name;
+#define OPTION_OPT_U64(name) uint64_t name;
+#define OPTION_OPT_UUID(name) uuid_d name;
+#define OPTION(name, ty) \
+ public: \
+ OPTION_##ty(name)
+#define SAFE_OPTION(name, ty) \
+ protected: \
+ OPTION_##ty(name)
+#include "common/legacy_config_opts.h"
+#undef OPTION_OPT_INT
+#undef OPTION_OPT_LONGLONG
+#undef OPTION_OPT_STR
+#undef OPTION_OPT_DOUBLE
+#undef OPTION_OPT_FLOAT
+#undef OPTION_OPT_BOOL
+#undef OPTION_OPT_ADDR
+#undef OPTION_OPT_ADDRVEC
+#undef OPTION_OPT_U32
+#undef OPTION_OPT_U64
+#undef OPTION_OPT_UUID
+#undef OPTION
+#undef SAFE_OPTION
+
+public:
+ enum set_value_result_t {
+ SET_NO_CHANGE,
+ SET_NO_EFFECT,
+ SET_HAVE_EFFECT,
+ };
+ /**
+ * @return true if changed, false otherwise
+ */
+ set_value_result_t set_value(const std::string& key,
+ Option::value_t&& value,
+ int level);
+ int rm_val(const std::string& key, int level);
+ void set_logging(int which, const char* val);
+ /**
+ * @param level the level of the setting, -1 for the one with the
+ * highest-priority
+ */
+ std::pair<Option::value_t, bool> get_value(const std::string& name,
+ int level) const;
+ template<typename Func> void for_each(Func&& func) const {
+ for (const auto& [name,configs] : values) {
+ func(name, configs);
+ }
+ }
+ bool contains(const std::string& key) const;
+};
Observer obs(conf_var);
- cct->_conf->add_observer(&obs);
+ cct->_conf.add_observer(&obs);
- cct->_conf->set_val_or_die(conf_var, buf);
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.set_val_or_die(conf_var, buf);
+ cct->_conf.apply_changes(nullptr);
- cct->_conf->remove_observer(&obs);
+ cct->_conf.remove_observer(&obs);
}
void pick_addresses(CephContext *cct, int needs)
{
struct ifaddrs *ifa;
int r = getifaddrs(&ifa);
- auto public_addr = cct->_conf->get_val<entity_addr_t>("public_addr");
- auto public_network = cct->_conf->get_val<std::string>("public_network");
+ auto public_addr = cct->_conf.get_val<entity_addr_t>("public_addr");
+ auto public_network = cct->_conf.get_val<std::string>("public_network");
auto public_network_interface =
- cct->_conf->get_val<std::string>("public_network_interface");
- auto cluster_addr = cct->_conf->get_val<entity_addr_t>("cluster_addr");
- auto cluster_network = cct->_conf->get_val<std::string>("cluster_network");
+ cct->_conf.get_val<std::string>("public_network_interface");
+ auto cluster_addr = cct->_conf.get_val<entity_addr_t>("cluster_addr");
+ auto cluster_network = cct->_conf.get_val<std::string>("cluster_network");
auto cluster_network_interface =
- cct->_conf->get_val<std::string>("cluster_network_interface");
+ cct->_conf.get_val<std::string>("cluster_network_interface");
if (r < 0) {
string err = cpp_strerror(errno);
unsigned msgrv = flags & (CEPH_PICK_ADDRESS_MSGR1 |
CEPH_PICK_ADDRESS_MSGR2);
if (msgrv == 0) {
- if (cct->_conf->get_val<bool>("ms_bind_msgr1")) {
+ if (cct->_conf.get_val<bool>("ms_bind_msgr1")) {
msgrv |= CEPH_PICK_ADDRESS_MSGR1;
}
- if (cct->_conf->get_val<bool>("ms_bind_msgr2")) {
+ if (cct->_conf.get_val<bool>("ms_bind_msgr2")) {
msgrv |= CEPH_PICK_ADDRESS_MSGR2;
}
if (msgrv == 0) {
unsigned ipv = flags & (CEPH_PICK_ADDRESS_IPV4 |
CEPH_PICK_ADDRESS_IPV6);
if (ipv == 0) {
- if (cct->_conf->get_val<bool>("ms_bind_ipv4")) {
+ if (cct->_conf.get_val<bool>("ms_bind_ipv4")) {
ipv |= CEPH_PICK_ADDRESS_IPV4;
}
- if (cct->_conf->get_val<bool>("ms_bind_ipv6")) {
+ if (cct->_conf.get_val<bool>("ms_bind_ipv6")) {
ipv |= CEPH_PICK_ADDRESS_IPV6;
}
if (ipv == 0) {
return -EINVAL;
}
- if (cct->_conf->get_val<bool>("ms_bind_prefer_ipv4")) {
+ if (cct->_conf.get_val<bool>("ms_bind_prefer_ipv4")) {
flags |= CEPH_PICK_ADDRESS_PREFER_IPV4;
} else {
flags &= ~CEPH_PICK_ADDRESS_PREFER_IPV4;
string networks;
string interfaces;
if (addrt & CEPH_PICK_ADDRESS_PUBLIC) {
- addr = cct->_conf->get_val<entity_addr_t>("public_addr");
- networks = cct->_conf->get_val<std::string>("public_network");
+ addr = cct->_conf.get_val<entity_addr_t>("public_addr");
+ networks = cct->_conf.get_val<std::string>("public_network");
interfaces =
- cct->_conf->get_val<std::string>("public_network_interface");
+ cct->_conf.get_val<std::string>("public_network_interface");
} else {
- addr = cct->_conf->get_val<entity_addr_t>("cluster_addr");
- networks = cct->_conf->get_val<std::string>("cluster_network");
+ addr = cct->_conf.get_val<entity_addr_t>("cluster_addr");
+ networks = cct->_conf.get_val<std::string>("cluster_network");
interfaces =
- cct->_conf->get_val<std::string>("cluster_network_interface");
+ cct->_conf.get_val<std::string>("cluster_network_interface");
if (networks.empty()) {
// fall back to public_ network and interface if cluster is not set
- networks = cct->_conf->get_val<std::string>("public_network");
+ networks = cct->_conf.get_val<std::string>("public_network");
interfaces =
- cct->_conf->get_val<std::string>("public_network_interface");
+ cct->_conf.get_val<std::string>("public_network_interface");
}
}
if (addr.is_blank_ip() &&
lderr(cct) << "leaked refs:\n";
dump_weak_refs(*_dout);
*_dout << dendl;
- if (cct->_conf->get_val<bool>("debug_asserts_on_shutdown")) {
+ if (cct->_conf.get_val<bool>("debug_asserts_on_shutdown")) {
assert(weak_refs.empty());
}
}
*/
int CrushWrapper::get_osd_pool_default_crush_replicated_ruleset(CephContext *cct)
{
- int crush_ruleset = cct->_conf->get_val<int64_t>("osd_pool_default_crush_rule");
+ int crush_ruleset = cct->_conf.get_val<int64_t>("osd_pool_default_crush_rule");
if (crush_ruleset < 0) {
crush_ruleset = find_first_ruleset(pg_pool_t::TYPE_REPLICATED);
} else if (!ruleset_exists(crush_ruleset)) {
* Global variables for use from process context.
*/
CephContext *g_ceph_context = NULL;
-md_config_t *g_conf = NULL;
+ConfigProxy& g_conf() {
+ return g_ceph_context->_conf;
+}
const char *g_assert_file = 0;
int g_assert_line = 0;
class CephContext;
extern CephContext *g_ceph_context;
-extern md_config_t *g_conf;
+ConfigProxy& g_conf();
extern const char *g_assert_file;
extern int g_assert_line;
static void global_init_set_globals(CephContext *cct)
{
g_ceph_context = cct;
- g_conf = cct->_conf;
}
static void output_ceph_version()
CephContext *cct = common_preinit(iparams, code_env, flags);
cct->_conf->cluster = cluster;
global_init_set_globals(cct);
- md_config_t *conf = cct->_conf;
+ auto& conf = cct->_conf;
if (flags & (CINIT_FLAG_NO_DEFAULT_CONFIG_FILE|
CINIT_FLAG_NO_MON_CONFIG)) {
// alternate defaults
if (defaults) {
for (auto& i : *defaults) {
- conf->set_val_default(i.first, i.second);
+ conf.set_val_default(i.first, i.second);
}
}
- int ret = conf->parse_config_files(c_str_or_null(conf_file_list),
- &cerr, flags);
+ int ret = conf.parse_config_files(c_str_or_null(conf_file_list),
+ &cerr, flags);
if (ret == -EDOM) {
cct->_log->flush();
cerr << "global_init: error parsing config file." << std::endl;
}
// environment variables override (CEPH_ARGS, CEPH_KEYRING)
- conf->parse_env();
+ conf.parse_env();
// command line (as passed by caller)
- conf->parse_argv(args);
+ conf.parse_argv(args);
if (!conf->no_mon_config) {
// make sure our mini-session gets legacy values
- conf->apply_changes(nullptr);
+ conf.apply_changes(nullptr);
MonClient mc_bootstrap(g_ceph_context);
if (mc_bootstrap.get_monmap_and_config() < 0) {
}
// do the --show-config[-val], if present in argv
- conf->do_argv_commands();
+ conf.do_argv_commands();
// Now we're ready to complain about config file parse errors
- g_conf->complain_about_parse_errors(g_ceph_context);
+ g_conf().complain_about_parse_errors(g_ceph_context);
}
boost::intrusive_ptr<CephContext>
int siglist[] = { SIGPIPE, 0 };
block_signals(siglist, NULL);
- if (g_conf->fatal_signal_handlers) {
+ if (g_conf()->fatal_signal_handlers) {
install_standard_sighandlers();
}
register_assert_context(g_ceph_context);
- if (g_conf->log_flush_on_exit)
+ if (g_conf()->log_flush_on_exit)
g_ceph_context->_log->set_flush_on_exit();
// drop privileges?
// consider --setuser root a no-op, even if we're not root
if (getuid() != 0) {
- if (g_conf->setuser.length()) {
- cerr << "ignoring --setuser " << g_conf->setuser << " since I am not root"
+ if (g_conf()->setuser.length()) {
+ cerr << "ignoring --setuser " << g_conf()->setuser << " since I am not root"
<< std::endl;
}
- if (g_conf->setgroup.length()) {
- cerr << "ignoring --setgroup " << g_conf->setgroup
+ if (g_conf()->setgroup.length()) {
+ cerr << "ignoring --setgroup " << g_conf()->setgroup
<< " since I am not root" << std::endl;
}
- } else if (g_conf->setgroup.length() ||
- g_conf->setuser.length()) {
+ } else if (g_conf()->setgroup.length() ||
+ g_conf()->setuser.length()) {
uid_t uid = 0; // zero means no change; we can only drop privs here.
gid_t gid = 0;
std::string uid_string;
std::string gid_string;
- if (g_conf->setuser.length()) {
- uid = atoi(g_conf->setuser.c_str());
+ if (g_conf()->setuser.length()) {
+ uid = atoi(g_conf()->setuser.c_str());
if (!uid) {
char buf[4096];
struct passwd pa;
struct passwd *p = 0;
- getpwnam_r(g_conf->setuser.c_str(), &pa, buf, sizeof(buf), &p);
+ getpwnam_r(g_conf()->setuser.c_str(), &pa, buf, sizeof(buf), &p);
if (!p) {
- cerr << "unable to look up user '" << g_conf->setuser << "'"
+ cerr << "unable to look up user '" << g_conf()->setuser << "'"
<< std::endl;
exit(1);
}
uid = p->pw_uid;
gid = p->pw_gid;
- uid_string = g_conf->setuser;
+ uid_string = g_conf()->setuser;
}
}
- if (g_conf->setgroup.length() > 0) {
- gid = atoi(g_conf->setgroup.c_str());
+ if (g_conf()->setgroup.length() > 0) {
+ gid = atoi(g_conf()->setgroup.c_str());
if (!gid) {
char buf[4096];
struct group gr;
struct group *g = 0;
- getgrnam_r(g_conf->setgroup.c_str(), &gr, buf, sizeof(buf), &g);
+ getgrnam_r(g_conf()->setgroup.c_str(), &gr, buf, sizeof(buf), &g);
if (!g) {
- cerr << "unable to look up group '" << g_conf->setgroup << "'"
+ cerr << "unable to look up group '" << g_conf()->setgroup << "'"
<< ": " << cpp_strerror(errno) << std::endl;
exit(1);
}
gid = g->gr_gid;
- gid_string = g_conf->setgroup;
+ gid_string = g_conf()->setgroup;
}
}
if ((uid || gid) &&
- g_conf->setuser_match_path.length()) {
+ g_conf()->setuser_match_path.length()) {
// induce early expansion of setuser_match_path config option
- string match_path = g_conf->setuser_match_path;
- g_conf->early_expand_meta(match_path, &cerr);
+ string match_path = g_conf()->setuser_match_path;
+ g_conf().early_expand_meta(match_path, &cerr);
struct stat st;
int r = ::stat(match_path.c_str(), &st);
if (r < 0) {
cerr << "unable to stat setuser_match_path "
- << g_conf->setuser_match_path
+ << g_conf()->setuser_match_path
<< ": " << cpp_strerror(errno) << std::endl;
exit(1);
}
#endif
// Expand metavariables. Invoke configuration observers. Open log file.
- g_conf->apply_changes(NULL);
+ g_conf().apply_changes(NULL);
- if (g_conf->run_dir.length() &&
+ if (g_conf()->run_dir.length() &&
code_env == CODE_ENVIRONMENT_DAEMON &&
!(flags & CINIT_FLAG_NO_DAEMON_ACTIONS)) {
- int r = ::mkdir(g_conf->run_dir.c_str(), 0755);
+ int r = ::mkdir(g_conf()->run_dir.c_str(), 0755);
if (r < 0 && errno != EEXIST) {
- cerr << "warning: unable to create " << g_conf->run_dir << ": " << cpp_strerror(errno) << std::endl;
+ cerr << "warning: unable to create " << g_conf()->run_dir << ": " << cpp_strerror(errno) << std::endl;
}
}
// call all observers now. this has the side-effect of configuring
// and opening the log file immediately.
- g_conf->call_all_observers();
+ g_conf().call_all_observers();
if (priv_ss.str().length()) {
dout(0) << priv_ss.str() << dendl;
// Fix ownership on log files and run directories if needed.
// Admin socket files are chown()'d during the common init path _after_
// the service thread has been started. This is sadly a bit of a hack :(
- chown_path(g_conf->run_dir,
+ chown_path(g_conf()->run_dir,
g_ceph_context->get_set_uid(),
g_ceph_context->get_set_gid(),
g_ceph_context->get_set_uid_string(),
}
// Now we're ready to complain about config file parse errors
- g_conf->complain_about_parse_errors(g_ceph_context);
+ g_conf().complain_about_parse_errors(g_ceph_context);
// test leak checking
- if (g_conf->debug_deliberately_leak_memory) {
+ if (g_conf()->debug_deliberately_leak_memory) {
derr << "deliberately leaking some memory" << dendl;
char *s = new char[1234567];
(void)s;
if (g_code_env != CODE_ENVIRONMENT_DAEMON)
return -1;
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
if (!conf->daemonize) {
if (pidfile_write(conf) < 0)
*/
reopen_as_null(cct, STDIN_FILENO);
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
if (pidfile_write(conf) < 0)
exit(1);
void global_init_chdir(const CephContext *cct)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
if (conf->chdir.empty())
return;
if (::chdir(conf->chdir.c_str())) {
int global_init_preload_erasure_code(const CephContext *cct)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
string plugins = conf->osd_erasure_code_plugins;
// validate that this is a not a legacy plugin
stringstream ss;
int r = ErasureCodePluginRegistry::instance().preload(
plugins,
- conf->get_val<std::string>("erasure_code_dir"),
+ conf.get_val<std::string>("erasure_code_dir"),
&ss);
if (r)
derr << ss.str() << dendl;
}
int verify();
int remove();
- int open(const md_config_t *conf);
+ int open(const ConfigProxy& conf);
int write();
};
return 0;
}
-int pidfh::open(const md_config_t *conf)
+int pidfh::open(const ConfigProxy& conf)
{
int len = snprintf(pf_path, sizeof(pf_path),
"%s", conf->pid_file.c_str());
pfh = nullptr;
}
-int pidfile_write(const md_config_t *conf)
+int pidfile_write(const ConfigProxy& conf)
{
if (conf->pid_file.empty()) {
dout(0) << __func__ << ": ignore empty --pid-file" << dendl;
// Write a pidfile with the current pid, using the configuration in the
// provided conf structure.
-int pidfile_write(const md_config_t *conf);
+int pidfile_write(const ConfigProxy& conf);
// Remove the pid file that was previously written by pidfile_write.
// This is safe to call in a signal handler context.
jf.open_object_section("crash");
jf.dump_string("crash_id", id);
now.gmtime(jf.dump_stream("timestamp"));
- jf.dump_string("entity_name", g_ceph_context->_conf->name.to_str());
+ jf.dump_string("entity_name", g_ceph_context->_conf.name().to_str());
jf.dump_string("ceph_version", ceph_version_to_str());
struct utsname u;
KeyRing keyring;
auto auth_client_required =
- cct->_conf->get_val<std::string>("auth_client_required");
+ cct->_conf.get_val<std::string>("auth_client_required");
if (auth_client_required != "none") {
r = keyring.from_ceph_context(cct);
- auto keyfile = cct->_conf->get_val<std::string>("keyfile");
- auto key = cct->_conf->get_val<std::string>("key");
+ auto keyfile = cct->_conf.get_val<std::string>("keyfile");
+ auto key = cct->_conf.get_val<std::string>("key");
if (r == -ENOENT && keyfile.empty() && key.empty())
r = 0;
if (r < 0) {
{
// init defaults. caller can override these if they want
// prior to calling open.
- options.write_buffer_size = g_conf->leveldb_write_buffer_size;
- options.cache_size = g_conf->leveldb_cache_size;
- options.block_size = g_conf->leveldb_block_size;
- options.bloom_size = g_conf->leveldb_bloom_size;
- options.compression_enabled = g_conf->leveldb_compression;
- options.paranoid_checks = g_conf->leveldb_paranoid;
- options.max_open_files = g_conf->leveldb_max_open_files;
- options.log_file = g_conf->leveldb_log;
+ options.write_buffer_size = g_conf()->leveldb_write_buffer_size;
+ options.cache_size = g_conf()->leveldb_cache_size;
+ options.block_size = g_conf()->leveldb_block_size;
+ options.bloom_size = g_conf()->leveldb_bloom_size;
+ options.compression_enabled = g_conf()->leveldb_compression;
+ options.paranoid_checks = g_conf()->leveldb_paranoid;
+ options.max_open_files = g_conf()->leveldb_max_open_files;
+ options.log_file = g_conf()->leveldb_log;
return 0;
}
ldoptions.paranoid_checks = options.paranoid_checks;
ldoptions.create_if_missing = create_if_missing;
- if (g_conf->leveldb_log_to_ceph_log) {
+ if (g_conf()->leveldb_log_to_ceph_log) {
ceph_logger = new CephLevelDBLogger(g_ceph_context);
ldoptions.info_log = ceph_logger;
}
logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
- if (g_conf->leveldb_compact_on_mount) {
+ if (g_conf()->leveldb_compact_on_mount) {
derr << "Compacting leveldb store..." << dendl;
compact();
derr << "Finished compacting leveldb store" << dendl;
}
}
- if (g_conf->rocksdb_perf) {
+ if (g_conf()->rocksdb_perf) {
dbstats = rocksdb::CreateDBStatistics();
opt.statistics = dbstats;
}
return -e.code().value();
}
- if (g_conf->rocksdb_log_to_ceph_log) {
+ if (g_conf()->rocksdb_log_to_ceph_log) {
opt.info_log.reset(new CephRocksdbLogger(g_ceph_context));
}
// caches
if (!set_cache_flag) {
- cache_size = g_conf->rocksdb_cache_size;
+ cache_size = g_conf()->rocksdb_cache_size;
}
- uint64_t row_cache_size = cache_size * g_conf->rocksdb_cache_row_ratio;
+ uint64_t row_cache_size = cache_size * g_conf()->rocksdb_cache_row_ratio;
uint64_t block_cache_size = cache_size - row_cache_size;
- if (g_conf->rocksdb_cache_type == "lru") {
+ if (g_conf()->rocksdb_cache_type == "lru") {
bbt_opts.block_cache = rocksdb::NewLRUCache(
block_cache_size,
- g_conf->rocksdb_cache_shard_bits);
- } else if (g_conf->rocksdb_cache_type == "clock") {
+ g_conf()->rocksdb_cache_shard_bits);
+ } else if (g_conf()->rocksdb_cache_type == "clock") {
bbt_opts.block_cache = rocksdb::NewClockCache(
block_cache_size,
- g_conf->rocksdb_cache_shard_bits);
+ g_conf()->rocksdb_cache_shard_bits);
} else {
- derr << "unrecognized rocksdb_cache_type '" << g_conf->rocksdb_cache_type
+ derr << "unrecognized rocksdb_cache_type '" << g_conf()->rocksdb_cache_type
<< "'" << dendl;
return -EINVAL;
}
- bbt_opts.block_size = g_conf->rocksdb_block_size;
+ bbt_opts.block_size = g_conf()->rocksdb_block_size;
if (row_cache_size > 0)
opt.row_cache = rocksdb::NewLRUCache(row_cache_size,
- g_conf->rocksdb_cache_shard_bits);
- uint64_t bloom_bits = g_conf->get_val<uint64_t>("rocksdb_bloom_bits_per_key");
+ g_conf()->rocksdb_cache_shard_bits);
+ uint64_t bloom_bits = g_conf().get_val<uint64_t>("rocksdb_bloom_bits_per_key");
if (bloom_bits > 0) {
dout(10) << __func__ << " set bloom filter bits per key to "
<< bloom_bits << dendl;
bbt_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bloom_bits));
}
using std::placeholders::_1;
- if (g_conf->with_val<std::string>("rocksdb_index_type",
+ if (g_conf().with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"binary_search")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch;
- if (g_conf->with_val<std::string>("rocksdb_index_type",
+ if (g_conf().with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"hash_search")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kHashSearch;
- if (g_conf->with_val<std::string>("rocksdb_index_type",
+ if (g_conf().with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"two_level")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
if (!bbt_opts.no_block_cache) {
bbt_opts.cache_index_and_filter_blocks =
- g_conf->get_val<bool>("rocksdb_cache_index_and_filter_blocks");
+ g_conf().get_val<bool>("rocksdb_cache_index_and_filter_blocks");
bbt_opts.cache_index_and_filter_blocks_with_high_priority =
- g_conf->get_val<bool>("rocksdb_cache_index_and_filter_blocks_with_high_priority");
+ g_conf().get_val<bool>("rocksdb_cache_index_and_filter_blocks_with_high_priority");
bbt_opts.pin_l0_filter_and_index_blocks_in_cache =
- g_conf->get_val<bool>("rocksdb_pin_l0_filter_and_index_blocks_in_cache");
+ g_conf().get_val<bool>("rocksdb_pin_l0_filter_and_index_blocks_in_cache");
}
- bbt_opts.partition_filters = g_conf->get_val<bool>("rocksdb_partition_filters");
- if (g_conf->get_val<uint64_t>("rocksdb_metadata_block_size") > 0)
- bbt_opts.metadata_block_size = g_conf->get_val<uint64_t>("rocksdb_metadata_block_size");
+ bbt_opts.partition_filters = g_conf().get_val<bool>("rocksdb_partition_filters");
+ if (g_conf().get_val<uint64_t>("rocksdb_metadata_block_size") > 0)
+ bbt_opts.metadata_block_size = g_conf().get_val<uint64_t>("rocksdb_metadata_block_size");
opt.table_factory.reset(rocksdb::NewBlockBasedTableFactory(bbt_opts));
- dout(10) << __func__ << " block size " << g_conf->rocksdb_block_size
+ dout(10) << __func__ << " block size " << g_conf()->rocksdb_block_size
<< ", block_cache size " << byte_u_t(block_cache_size)
<< ", row_cache size " << byte_u_t(row_cache_size)
<< "; shards "
- << (1 << g_conf->rocksdb_cache_shard_bits)
- << ", type " << g_conf->rocksdb_cache_type
+ << (1 << g_conf()->rocksdb_cache_shard_bits)
+ << ", type " << g_conf()->rocksdb_cache_type
<< dendl;
opt.merge_operator.reset(new MergeOperatorRouter(*this));
void RocksDBStore::get_statistics(Formatter *f)
{
- if (!g_conf->rocksdb_perf) {
+ if (!g_conf()->rocksdb_perf) {
dout(20) << __func__ << " RocksDB perf is disabled, can't probe for stats"
<< dendl;
return;
}
- if (g_conf->rocksdb_collect_compaction_stats) {
+ if (g_conf()->rocksdb_collect_compaction_stats) {
std::string stat_str;
bool status = db->GetProperty("rocksdb.stats", &stat_str);
if (status) {
f->close_section();
}
}
- if (g_conf->rocksdb_collect_extended_stats) {
+ if (g_conf()->rocksdb_collect_extended_stats) {
if (dbstats) {
f->open_object_section("rocksdb_extended_statistics");
string stat_str = dbstats->ToString();
logger->dump_formatted(f,0);
f->close_section();
}
- if (g_conf->rocksdb_collect_memory_stats) {
+ if (g_conf()->rocksdb_collect_memory_stats) {
f->open_object_section("rocksdb_memtable_statistics");
std::string str;
if (!bbt_opts.no_block_cache) {
{
// enable rocksdb breakdown
// considering performance overhead, default is disabled
- if (g_conf->rocksdb_perf) {
+ if (g_conf()->rocksdb_perf) {
rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableTimeExceptForMutex);
rocksdb::get_perf_context()->Reset();
}
<< " Rocksdb transaction: " << rocks_txc.seen << dendl;
}
- if (g_conf->rocksdb_perf) {
+ if (g_conf()->rocksdb_perf) {
utime_t write_memtable_time;
utime_t write_delay_time;
utime_t write_wal_time;
int RocksDBStore::set_cache_high_pri_pool_ratio(double ratio)
{
- if (g_conf->rocksdb_cache_type != "lru") {
+ if (g_conf()->rocksdb_cache_type != "lru") {
return -EOPNOTSUPP;
}
dout(10) << __func__ << " old ratio: "
int conf_read_file(const char *path_list)
{
- int ret = cct->_conf->parse_config_files(path_list, nullptr, 0);
+ int ret = cct->_conf.parse_config_files(path_list, nullptr, 0);
if (ret)
return ret;
- cct->_conf->apply_changes(nullptr);
- cct->_conf->complain_about_parse_errors(cct);
+ cct->_conf.apply_changes(nullptr);
+ cct->_conf.complain_about_parse_errors(cct);
return 0;
}
int ret;
vector<const char*> args;
argv_to_vec(argc, argv, args);
- ret = cct->_conf->parse_argv(args);
+ ret = cct->_conf.parse_argv(args);
if (ret)
return ret;
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
return 0;
}
int conf_parse_env(const char *name)
{
- md_config_t *conf = cct->_conf;
- conf->parse_env(name);
- conf->apply_changes(nullptr);
+ auto& conf = cct->_conf;
+ conf.parse_env(name);
+ conf.apply_changes(nullptr);
return 0;
}
int conf_set(const char *option, const char *value)
{
- int ret = cct->_conf->set_val(option, value);
+ int ret = cct->_conf.set_val(option, value);
if (ret)
return ret;
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
return 0;
}
int conf_get(const char *option, char *buf, size_t len)
{
char *tmp = buf;
- return cct->_conf->get_val(option, &tmp, len);
+ return cct->_conf.get_val(option, &tmp, len);
}
Client *get_client()
}
CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
- cct->_conf->parse_env(); // environment variables coverride
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.parse_env(); // environment variables coverride
+ cct->_conf.apply_changes(nullptr);
int ret = ceph_create_with_context(cmount, cct);
cct->put();
cct = nullptr;
public:
using Dispatcher::cct;
- md_config_t *conf;
+ const ConfigProxy& conf;
private:
enum {
DISCONNECTED,
int librados::Rados::conf_get(const char *option, std::string &val)
{
char *str = NULL;
- md_config_t *conf = client->cct->_conf;
- int ret = conf->get_val(option, &str, -1);
+ const auto& conf = client->cct->_conf;
+ int ret = conf.get_val(option, &str, -1);
if (ret) {
free(str);
return ret;
CephContext *cct = common_preinit(*iparams, CODE_ENVIRONMENT_LIBRARY, 0);
if (clustername)
cct->_conf->cluster = clustername;
- cct->_conf->parse_env(); // environment variables override
- cct->_conf->apply_changes(NULL);
+ cct->_conf.parse_env(); // environment variables override
+ cct->_conf.apply_changes(nullptr);
TracepointProvider::initialize<tracepoint_traits>(cct);
return cct;
{
tracepoint(librados, rados_conf_read_file_enter, cluster, path_list);
librados::RadosClient *client = (librados::RadosClient *)cluster;
- md_config_t *conf = client->cct->_conf;
+ auto& conf = client->cct->_conf;
ostringstream warnings;
- int ret = conf->parse_config_files(path_list, &warnings, 0);
+ int ret = conf.parse_config_files(path_list, &warnings, 0);
if (ret) {
if (warnings.tellp() > 0)
lderr(client->cct) << warnings.str() << dendl;
- client->cct->_conf->complain_about_parse_errors(client->cct);
+ client->cct->_conf.complain_about_parse_errors(client->cct);
tracepoint(librados, rados_conf_read_file_exit, ret);
return ret;
}
- conf->parse_env(); // environment variables override
+ conf.parse_env(); // environment variables override
- conf->apply_changes(NULL);
- client->cct->_conf->complain_about_parse_errors(client->cct);
+ conf.apply_changes(nullptr);
+ client->cct->_conf.complain_about_parse_errors(client->cct);
tracepoint(librados, rados_conf_read_file_exit, 0);
return 0;
}
tracepoint(librados, rados_conf_parse_argv_arg, argv[i]);
}
librados::RadosClient *client = (librados::RadosClient *)cluster;
- md_config_t *conf = client->cct->_conf;
+ auto& conf = client->cct->_conf;
vector<const char*> args;
argv_to_vec(argc, argv, args);
- int ret = conf->parse_argv(args);
+ int ret = conf.parse_argv(args);
if (ret) {
tracepoint(librados, rados_conf_parse_argv_exit, ret);
return ret;
}
- conf->apply_changes(NULL);
+ conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_parse_argv_exit, 0);
return 0;
}
tracepoint(librados, rados_conf_parse_argv_remainder_arg, argv[i]);
}
librados::RadosClient *client = (librados::RadosClient *)cluster;
- md_config_t *conf = client->cct->_conf;
+ auto& conf = client->cct->_conf;
vector<const char*> args;
for (int i=0; i<argc; i++)
args.push_back(argv[i]);
- int ret = conf->parse_argv(args);
+ int ret = conf.parse_argv(args);
if (ret) {
tracepoint(librados, rados_conf_parse_argv_remainder_exit, ret);
return ret;
}
- conf->apply_changes(NULL);
+ conf.apply_changes(NULL);
assert(args.size() <= (unsigned int)argc);
for (i = 0; i < (unsigned int)argc; ++i) {
if (i < args.size())
{
tracepoint(librados, rados_conf_parse_env_enter, cluster, env);
librados::RadosClient *client = (librados::RadosClient *)cluster;
- md_config_t *conf = client->cct->_conf;
- conf->parse_env(env);
- conf->apply_changes(NULL);
+ auto& conf = client->cct->_conf;
+ conf.parse_env(env);
+ conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_parse_env_exit, 0);
return 0;
}
{
tracepoint(librados, rados_conf_set_enter, cluster, option, value);
librados::RadosClient *client = (librados::RadosClient *)cluster;
- md_config_t *conf = client->cct->_conf;
- int ret = conf->set_val(option, value);
+ auto& conf = client->cct->_conf;
+ int ret = conf.set_val(option, value);
if (ret) {
tracepoint(librados, rados_conf_set_exit, ret);
return ret;
}
- conf->apply_changes(NULL);
+ conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_set_exit, 0);
return 0;
}
tracepoint(librados, rados_conf_get_enter, cluster, option, len);
char *tmp = buf;
librados::RadosClient *client = (librados::RadosClient *)cluster;
- md_config_t *conf = client->cct->_conf;
- int retval = conf->get_val(option, &tmp, len);
+ const auto& conf = client->cct->_conf;
+ int retval = conf.get_val(option, &tmp, len);
tracepoint(librados, rados_conf_get_exit, retval, retval ? "" : option);
return retval;
}
: ThreadPool(cct, "librbd::thread_pool", "tp_librbd", 1,
"rbd_op_threads"),
op_work_queue(new ContextWQ("librbd::op_work_queue",
- cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
+ cct->_conf.get_val<int64_t>("rbd_op_thread_timeout"),
this)) {
start();
}
get_thread_pool_instance(cct, &thread_pool, &op_work_queue);
io_work_queue = new io::ImageRequestWQ<>(
this, "librbd::io_work_queue",
- cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
+ cct->_conf.get_val<int64_t>("rbd_op_thread_timeout"),
thread_pool);
io_object_dispatcher = new io::ObjectDispatcher<>(this);
- if (cct->_conf->get_val<bool>("rbd_auto_exclusive_lock_until_manual_request")) {
+ if (cct->_conf.get_val<bool>("rbd_auto_exclusive_lock_until_manual_request")) {
exclusive_lock_policy = new exclusive_lock::AutomaticPolicy(this);
} else {
exclusive_lock_policy = new exclusive_lock::StandardPolicy(this);
if (configs[key]) \
config = local_config_t.get_val<type>("rbd_"#config); \
else \
- config = cct->_conf->get_val<type>("rbd_"#config); \
+ config = cct->_conf.get_val<type>("rbd_"#config); \
} while (0);
ASSIGN_OPTION(non_blocking_aio, bool);
ThreadPoolSingleton>("librbd::ImageUpdateWatchers::thread_pool",
false, m_cct);
m_work_queue = new ContextWQ("librbd::ImageUpdateWatchers::op_work_queue",
- m_cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
+ m_cct->_conf.get_val<int64_t>("rbd_op_thread_timeout"),
&thread_pool);
}
schedule_request_lock(true);
} else {
// lock owner acked -- but resend if we don't see them release the lock
- int retry_timeout = m_image_ctx.cct->_conf->template get_val<int64_t>(
+ int retry_timeout = m_image_ctx.cct->_conf.template get_val<int64_t>(
"client_notify_timeout");
ldout(m_image_ctx.cct, 15) << this << " will retry in " << retry_timeout
<< " seconds" << dendl;
&cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
"librbd::journal::thread_pool", false, cct);
m_work_queue = new ContextWQ("librbd::journal::work_queue",
- cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
+ cct->_conf.get_val<int64_t>("rbd_op_thread_timeout"),
thread_pool_singleton);
ImageCtx::get_timer_instance(cct, &m_timer, &m_timer_lock);
}
uint64_t get_rbd_default_features(CephContext* cct)
{
- auto value = cct->_conf->get_val<std::string>("rbd_default_features");
+ auto value = cct->_conf.get_val<std::string>("rbd_default_features");
return librbd::rbd_features_from_string(value, nullptr);
}
{
Mutex::Locker locker(m_lock);
for (int i = 0;
- i < m_cct->_conf->get_val<int64_t>("rbd_concurrent_management_ops");
+ i < m_cct->_conf.get_val<int64_t>("rbd_concurrent_management_ops");
++i) {
send_next_object_copy();
if (m_ret_val < 0 && m_current_ops == 0) {
m_use_p_features = false;
}
- std::string default_clone_format = m_cct->_conf->get_val<std::string>(
+ std::string default_clone_format = m_cct->_conf.get_val<std::string>(
"rbd_default_clone_format");
if (default_clone_format == "1") {
m_clone_format = 1;
if (image_options.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &m_stripe_unit) != 0 ||
m_stripe_unit == 0) {
- m_stripe_unit = m_cct->_conf->get_val<uint64_t>("rbd_default_stripe_unit");
+ m_stripe_unit = m_cct->_conf.get_val<uint64_t>("rbd_default_stripe_unit");
}
if (image_options.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &m_stripe_count) != 0 ||
m_stripe_count == 0) {
- m_stripe_count = m_cct->_conf->get_val<uint64_t>("rbd_default_stripe_count");
+ m_stripe_count = m_cct->_conf.get_val<uint64_t>("rbd_default_stripe_count");
}
if (get_image_option(image_options, RBD_IMAGE_OPTION_ORDER, &m_order) != 0 ||
m_order == 0) {
- m_order = m_cct->_conf->get_val<int64_t>("rbd_default_order");
+ m_order = m_cct->_conf.get_val<int64_t>("rbd_default_order");
}
if (get_image_option(image_options, RBD_IMAGE_OPTION_JOURNAL_ORDER,
&m_journal_order) != 0) {
- m_journal_order = m_cct->_conf->get_val<uint64_t>("rbd_journal_order");
+ m_journal_order = m_cct->_conf.get_val<uint64_t>("rbd_journal_order");
}
if (get_image_option(image_options, RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH,
&m_journal_splay_width) != 0) {
- m_journal_splay_width = m_cct->_conf->get_val<uint64_t>("rbd_journal_splay_width");
+ m_journal_splay_width = m_cct->_conf.get_val<uint64_t>("rbd_journal_splay_width");
}
if (image_options.get(RBD_IMAGE_OPTION_JOURNAL_POOL, &m_journal_pool) != 0) {
- m_journal_pool = m_cct->_conf->get_val<std::string>("rbd_journal_pool");
+ m_journal_pool = m_cct->_conf.get_val<std::string>("rbd_journal_pool");
}
if (image_options.get(RBD_IMAGE_OPTION_DATA_POOL, &m_data_pool) != 0) {
- m_data_pool = m_cct->_conf->get_val<std::string>("rbd_default_data_pool");
+ m_data_pool = m_cct->_conf.get_val<std::string>("rbd_default_data_pool");
}
m_layout.object_size = 1ull << m_order;
template<typename I>
void CreateRequest<I>::validate_pool() {
- if (!m_cct->_conf->get_val<bool>("rbd_validate_pool")) {
+ if (!m_cct->_conf.get_val<bool>("rbd_validate_pool")) {
add_image_to_directory();
return;
}
namespace {
int validate_pool(IoCtx &io_ctx, CephContext *cct) {
- if (!cct->_conf->get_val<bool>("rbd_validate_pool")) {
+ if (!cct->_conf.get_val<bool>("rbd_validate_pool")) {
return 0;
}
uint64_t format;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0)
- format = cct->_conf->get_val<int64_t>("rbd_default_format");
+ format = cct->_conf.get_val<int64_t>("rbd_default_format");
bool old_format = format == 1;
// make sure it doesn't already exist, in either format
uint64_t order = 0;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0 || order == 0) {
- order = cct->_conf->get_val<int64_t>("rbd_default_order");
+ order = cct->_conf.get_val<int64_t>("rbd_default_order");
}
r = image::CreateRequest<>::validate_order(cct, order);
if (r < 0) {
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf->get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(bl.c_str(), bl.length())) {
int r = ictx->io_work_queue->discard(ofs, len, false);
tracepoint(librbd, writesame_exit, r);
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf->get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(bl.c_str(), bl.length())) {
ictx->io_work_queue->aio_discard(get_aio_completion(c), off, len, false);
tracepoint(librbd, aio_writesame_exit, 0);
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf->get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
int r = ictx->io_work_queue->discard(ofs, len, false);
tracepoint(librbd, writesame_exit, r);
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf->get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
ictx->io_work_queue->aio_discard(get_aio_completion(comp), off, len, false);
tracepoint(librbd, aio_writesame_exit, 0);
delete (Log **)p;// Delete allocated pointer (not Log object, the pointer only!)
}
-Log::Log(SubsystemMap *s)
+Log::Log(const SubsystemMap *s)
: m_indirect_this(NULL),
m_subs(s),
m_queue_mutex_holder(0),
Log **m_indirect_this;
log_clock clock;
- SubsystemMap *m_subs;
+ const SubsystemMap *m_subs;
pthread_mutex_t m_queue_mutex;
pthread_mutex_t m_flush_mutex;
void _log_message(const char *s, bool crash);
public:
- Log(SubsystemMap *s);
+ Log(const SubsystemMap *s);
~Log() override;
void set_flush_on_exit();
}
template <unsigned SubV, int LvlV>
- bool should_gather() {
+ bool should_gather() const {
static_assert(SubV < get_num(), "wrong subsystem ID");
static_assert(LvlV >= -1 && LvlV <= 200);
LvlV <= ceph_subsys_get_max_default_level(SubV));
}
}
- bool should_gather(const unsigned sub, int level) {
+ bool should_gather(const unsigned sub, int level) const {
assert(sub < m_subsys.size());
return level <= static_cast<int>(m_gather_levels[sub]);
}
assert(mdsmap != NULL);
_notify_mdsmap(mdsmap);
- standby_for_rank = mds_rank_t(g_conf->mds_standby_for_rank);
- standby_for_name = g_conf->mds_standby_for_name;
- standby_for_fscid = fs_cluster_id_t(g_conf->mds_standby_for_fscid);
- standby_replay = g_conf->mds_standby_replay;
+ standby_for_rank = mds_rank_t(g_conf()->mds_standby_for_rank);
+ standby_for_name = g_conf()->mds_standby_for_name;
+ standby_for_fscid = fs_cluster_id_t(g_conf()->mds_standby_for_fscid);
+ standby_replay = g_conf()->mds_standby_replay;
// Spawn threads and start messaging
timer.init();
dout(10) << "handle_mds_beacon " << ceph_mds_state_name(m->get_state())
<< " seq " << m->get_seq() << " rtt " << rtt << dendl;
- if (was_laggy && rtt < g_conf->mds_beacon_grace) {
+ if (was_laggy && rtt < g_conf()->mds_beacon_grace) {
dout(0) << "handle_mds_beacon no longer laggy" << dendl;
was_laggy = false;
laggy_until = now;
// later beacons will clear it.
dout(1) << "handle_mds_beacon system clock goes backwards, "
<< "mark myself laggy" << dendl;
- last_acked_stamp = now - utime_t(g_conf->mds_beacon_grace + 1, 0);
+ last_acked_stamp = now - utime_t(g_conf()->mds_beacon_grace + 1, 0);
was_laggy = true;
}
timer.cancel_event(sender);
}
sender = timer.add_event_after(
- g_conf->mds_beacon_interval,
+ g_conf()->mds_beacon_interval,
new FunctionContext([this](int) {
assert(lock.is_locked_by_me());
sender = nullptr;
utime_t now = ceph_clock_now();
utime_t since = now - last_acked_stamp;
- if (since > g_conf->mds_beacon_grace) {
- dout(5) << "is_laggy " << since << " > " << g_conf->mds_beacon_grace
+ if (since > g_conf()->mds_beacon_grace) {
+ dout(5) << "is_laggy " << since << " > " << g_conf()->mds_beacon_grace
<< " since last acked beacon" << dendl;
was_laggy = true;
- if (since > (g_conf->mds_beacon_grace*2) &&
- now > last_mon_reconnect + g_conf->mds_beacon_interval) {
+ if (since > (g_conf()->mds_beacon_grace*2) &&
+ now > last_mon_reconnect + g_conf()->mds_beacon_interval) {
// maybe it's not us?
dout(5) << "initiating monitor reconnect; maybe we're not the slow one"
<< dendl;
// Detect MDS_HEALTH_TRIM condition
// Arbitrary factor of 2, indicates MDS is not trimming promptly
{
- if (mds->mdlog->get_num_segments() > (size_t)(g_conf->mds_log_max_segments * 2)) {
+ if (mds->mdlog->get_num_segments() > (size_t)(g_conf()->mds_log_max_segments * 2)) {
std::ostringstream oss;
oss << "Behind on trimming (" << mds->mdlog->get_num_segments()
- << "/" << g_conf->mds_log_max_segments << ")";
+ << "/" << g_conf()->mds_log_max_segments << ")";
MDSHealthMetric m(MDS_HEALTH_TRIM, HEALTH_WARN, oss.str());
m.metadata["num_segments"] = stringify(mds->mdlog->get_num_segments());
- m.metadata["max_segments"] = stringify(g_conf->mds_log_max_segments);
+ m.metadata["max_segments"] = stringify(g_conf()->mds_log_max_segments);
health.metrics.push_back(m);
}
}
late_cap_metrics.push_back(m);
}
- if (late_cap_metrics.size() <= (size_t)g_conf->mds_health_summarize_threshold) {
+ if (late_cap_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
health.metrics.splice(health.metrics.end(), late_cap_metrics);
} else {
std::ostringstream oss;
mds->sessionmap.get_client_session_set(sessions);
utime_t cutoff = ceph_clock_now();
- cutoff -= g_conf->mds_recall_state_timeout;
+ cutoff -= g_conf()->mds_recall_state_timeout;
utime_t last_recall = mds->mdcache->last_recall_state;
std::list<MDSHealthMetric> late_recall_metrics;
}
}
if ((session->get_num_trim_requests_warnings() > 0 &&
- session->get_num_completed_requests() >= g_conf->mds_max_completed_requests) ||
+ session->get_num_completed_requests() >= g_conf()->mds_max_completed_requests) ||
(session->get_num_trim_flushes_warnings() > 0 &&
- session->get_num_completed_flushes() >= g_conf->mds_max_completed_flushes)) {
+ session->get_num_completed_flushes() >= g_conf()->mds_max_completed_flushes)) {
std::ostringstream oss;
oss << "Client " << session->get_human_name() << " failing to advance its oldest client/flush tid";
MDSHealthMetric m(MDS_HEALTH_CLIENT_OLDEST_TID, HEALTH_WARN, oss.str());
}
}
- if (late_recall_metrics.size() <= (size_t)g_conf->mds_health_summarize_threshold) {
+ if (late_recall_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
health.metrics.splice(health.metrics.end(), late_recall_metrics);
} else {
std::ostringstream oss;
late_recall_metrics.clear();
}
- if (large_completed_requests_metrics.size() <= (size_t)g_conf->mds_health_summarize_threshold) {
+ if (large_completed_requests_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
health.metrics.splice(health.metrics.end(), large_completed_requests_metrics);
} else {
std::ostringstream oss;
dout(20) << slow << " slow request found" << dendl;
if (slow) {
std::ostringstream oss;
- oss << slow << " slow requests are blocked > " << g_conf->mds_op_complaint_time << " sec";
+ oss << slow << " slow requests are blocked > " << g_conf()->mds_op_complaint_time << " sec";
MDSHealthMetric m(MDS_HEALTH_SLOW_REQUEST, HEALTH_WARN, oss.str());
health.metrics.push_back(m);
out << " " << dir.fnode.fragstat;
if (!(dir.fnode.fragstat == dir.fnode.accounted_fragstat))
out << "/" << dir.fnode.accounted_fragstat;
- if (g_conf->mds_debug_scatterstat && dir.is_projected()) {
+ if (g_conf()->mds_debug_scatterstat && dir.is_projected()) {
const fnode_t *pf = dir.get_projected_fnode();
out << "->" << pf->fragstat;
if (!(pf->fragstat == pf->accounted_fragstat))
out << " " << dir.fnode.rstat;
if (!(dir.fnode.rstat == dir.fnode.accounted_rstat))
out << "/" << dir.fnode.accounted_rstat;
- if (g_conf->mds_debug_scatterstat && dir.is_projected()) {
+ if (g_conf()->mds_debug_scatterstat && dir.is_projected()) {
const fnode_t *pf = dir.get_projected_fnode();
out << "->" << pf->rstat;
if (!(pf->rstat == pf->accounted_rstat))
*/
bool CDir::check_rstats(bool scrub)
{
- if (!g_conf->mds_debug_scatterstat && !scrub)
+ if (!g_conf()->mds_debug_scatterstat && !scrub)
return true;
dout(25) << "check_rstats on " << this << dendl;
rd.omap_get_header(&fin->hdrbl, &fin->ret1);
if (keys.empty()) {
assert(!c);
- rd.omap_get_vals("", "", g_conf->mds_dir_keys_per_op,
+ rd.omap_get_vals("", "", g_conf()->mds_dir_keys_per_op,
&fin->omap, &fin->more, &fin->ret2);
} else {
assert(c);
rd.omap_get_vals_by_keys(str_keys, &fin->omap, &fin->ret2);
}
// check the correctness of backtrace
- if (g_conf->mds_verify_backtrace > 0 && frag == frag_t()) {
+ if (g_conf()->mds_verify_backtrace > 0 && frag == frag_t()) {
rd.getxattr("parent", &fin->btbl, &fin->ret3);
rd.set_last_op_flags(CEPH_OSD_OP_FLAG_FAILOK);
} else {
ObjectOperation rd;
rd.omap_get_vals(fin->omap.rbegin()->first,
"", /* filter prefix */
- g_conf->mds_dir_keys_per_op,
+ g_conf()->mds_dir_keys_per_op,
&fin->omap_more,
&fin->more,
&fin->ret);
//in->hack_accessed = false;
//in->hack_load_stamp = ceph_clock_now();
//num_new_inodes_loaded++;
- } else if (g_conf->get_val<bool>("mds_hack_allow_loading_invalid_metadata")) {
+ } else if (g_conf().get_val<bool>("mds_hack_allow_loading_invalid_metadata")) {
dout(20) << "hack: adding duplicate dentry for " << *in << dendl;
dn = add_primary_dentry(dname, in, first, last);
} else {
bool CDir::should_split_fast() const
{
// Max size a fragment can be before trigger fast splitting
- int fast_limit = g_conf->mds_bal_split_size * g_conf->mds_bal_fragment_fast_factor;
+ int fast_limit = g_conf()->mds_bal_split_size * g_conf()->mds_bal_fragment_fast_factor;
// Fast path: the sum of accounted size and null dentries does not
// exceed threshold: we definitely are not over it.
void merge(std::list<CDir*>& subs, std::list<MDSInternalContextBase*>& waiters, bool replay);
bool should_split() const {
- return (int)get_frag_size() > g_conf->mds_bal_split_size;
+ return (int)get_frag_size() > g_conf()->mds_bal_split_size;
}
bool should_split_fast() const;
bool should_merge() const {
- return (int)get_frag_size() < g_conf->mds_bal_merge_size;
+ return (int)get_frag_size() < g_conf()->mds_bal_merge_size;
}
private:
if (in.inode.is_dir()) {
out << " " << in.inode.dirstat;
- if (g_conf->mds_debug_scatterstat && in.is_projected()) {
+ if (g_conf()->mds_debug_scatterstat && in.is_projected()) {
const CInode::mempool_inode *pi = in.get_projected_inode();
out << "->" << pi->dirstat;
}
out << " " << in.inode.rstat;
if (!(in.inode.rstat == in.inode.accounted_rstat))
out << "/" << in.inode.accounted_rstat;
- if (g_conf->mds_debug_scatterstat && in.is_projected()) {
+ if (g_conf()->mds_debug_scatterstat && in.is_projected()) {
const CInode::mempool_inode *pi = in.get_projected_inode();
out << "->" << pi->rstat;
if (!(pi->rstat == pi->accounted_rstat))
if (err) {
MDSRank *mds = mdcache->mds;
mds->clog->error() << "bad backtrace on directory inode " << ino();
- assert(!"bad backtrace" == (g_conf->mds_verify_backtrace > 1));
+ assert(!"bad backtrace" == (g_conf()->mds_verify_backtrace > 1));
mark_dirty_parent(mds->mdlog->get_current_segment(), false);
mds->mdlog->flush();
p.second->state_clear(CDir::STATE_DIRTYDFT);
}
}
- if (g_conf->mds_debug_frag)
+ if (g_conf()->mds_debug_frag)
verify_dirfrags();
}
break;
pf->fragstat.nsubdirs < 0) {
clog->error() << "bad/negative dir size on "
<< dir->dirfrag() << " " << pf->fragstat;
- assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter);
+ assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
if (pf->fragstat.nfiles < 0)
pf->fragstat.nfiles = 0;
} else {
clog->error() << "unmatched fragstat on " << ino() << ", inode has "
<< pi->dirstat << ", dirfrags have " << dirstat;
- assert(!"unmatched fragstat" == g_conf->mds_verify_scatter);
+ assert(!"unmatched fragstat" == g_conf()->mds_verify_scatter);
}
// trust the dirfrags for now
version_t v = pi->dirstat.version;
make_path_string(path);
clog->error() << "Inconsistent statistics detected: fragstat on inode "
<< ino() << " (" << path << "), inode has " << pi->dirstat;
- assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter);
+ assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
if (pi->dirstat.nfiles < 0)
pi->dirstat.nfiles = 0;
clog->error() << "inconsistent rstat on inode " << ino()
<< ", inode has " << pi->rstat
<< ", directory fragments have " << rstat;
- assert(!"unmatched rstat" == g_conf->mds_verify_scatter);
+ assert(!"unmatched rstat" == g_conf()->mds_verify_scatter);
}
// trust the dirfrag for now
version_t v = pi->rstat.version;
<< auth_pins << "+" << nested_auth_pins << dendl;
assert(nested_auth_pins >= 0);
- if (g_conf->mds_debug_auth_pins) {
+ if (g_conf()->mds_debug_auth_pins) {
// audit
int s = 0;
for (const auto &p : dirfrags) {
old.inode.trim_client_ranges(follows);
- if (g_conf->mds_snap_rstat &&
+ if (g_conf()->mds_snap_rstat &&
!(old.inode.rstat == old.inode.accounted_rstat))
dirty_old_rstats.insert(follows);
void CInode::maybe_export_pin(bool update)
{
- if (!g_conf->mds_bal_export_pin)
+ if (!g_conf()->mds_bal_export_pin)
return;
if (!is_dir() || !is_normal())
return;
public:
void set_primary_parent(CDentry *p) {
assert(parent == 0 ||
- g_conf->get_val<bool>("mds_hack_allow_loading_invalid_metadata"));
+ g_conf().get_val<bool>("mds_hack_allow_loading_invalid_metadata"));
parent = p;
}
void remove_primary_parent(CDentry *dn) {
bool DamageTable::oversized() const
{
- return by_id.size() > (size_t)(g_conf->mds_damage_table_max_entries);
+ return by_id.size() > (size_t)(g_conf()->mds_damage_table_max_entries);
}
bool DamageTable::is_dentry_damaged(
for (const auto &rank : fs->mds_map.failed) {
const mds_gid_t replacement = find_replacement_for(
- {fs->fscid, rank}, {}, g_conf->mon_force_standby_active);
+ {fs->fscid, rank}, {}, g_conf()->mon_force_standby_active);
if (replacement == MDS_GID_NONE) {
stuck_failed.insert(rank);
}
uint64_t Locker::calc_new_max_size(CInode::mempool_inode *pi, uint64_t size)
{
uint64_t new_max = (size + 1) << 1;
- uint64_t max_inc = g_conf->mds_client_writeable_range_max_inc_objs;
+ uint64_t max_inc = g_conf()->mds_client_writeable_range_max_inc_objs;
if (max_inc > 0) {
max_inc *= pi->layout.object_size;
new_max = std::min(new_max, size + max_inc);
mds->mdlog->get_current_segment()->touched_sessions.insert(session->info.inst.name);
if (session->get_num_trim_flushes_warnings() > 0 &&
- session->get_num_completed_flushes() * 2 < g_conf->mds_max_completed_flushes)
+ session->get_num_completed_flushes() * 2 < g_conf()->mds_max_completed_flushes)
session->reset_num_trim_flushes_warnings();
} else {
if (session->get_num_completed_flushes() >=
- (g_conf->mds_max_completed_flushes << session->get_num_trim_flushes_warnings())) {
+ (g_conf()->mds_max_completed_flushes << session->get_num_trim_flushes_warnings())) {
session->inc_num_trim_flushes_warnings();
stringstream ss;
ss << "client." << session->get_client() << " does not advance its oldest_flush_tid ("
// snap inodes that needs flush are auth pinned, they affect
// subtree/difrarg freeze.
utime_t cutoff = now;
- cutoff -= g_conf->mds_freeze_tree_timeout / 3;
+ cutoff -= g_conf()->mds_freeze_tree_timeout / 3;
CInode *last = need_snapflush_inodes.back();
while (!need_snapflush_inodes.empty()) {
<< *lock << " " << *lock->get_parent() << dendl;
continue;
}
- if (now - lock->get_update_stamp() < g_conf->mds_scatter_nudge_interval)
+ if (now - lock->get_update_stamp() < g_conf()->mds_scatter_nudge_interval)
break;
updated_scatterlocks.pop_front();
scatter_nudge(lock, 0);
void MDBalancer::tick()
{
- static int num_bal_times = g_conf->mds_bal_max;
- auto bal_interval = mds->cct->_conf->get_val<int64_t>("mds_bal_interval");
- auto bal_max_until = mds->cct->_conf->get_val<int64_t>("mds_bal_max_until");
+ static int num_bal_times = g_conf()->mds_bal_max;
+ auto bal_interval = mds->cct->_conf.get_val<int64_t>("mds_bal_interval");
+ auto bal_max_until = mds->cct->_conf.get_val<int64_t>("mds_bal_max_until");
time now = clock::now();
- if (g_conf->mds_bal_export_pin) {
+ if (g_conf()->mds_bal_export_pin) {
handle_export_pins();
}
// sample?
if (chrono::duration<double>(now-last_sample).count() >
- g_conf->mds_bal_sample_interval) {
+ g_conf()->mds_bal_sample_interval) {
dout(15) << "tick last_sample now " << now << dendl;
last_sample = now;
}
double mds_load_t::mds_load() const
{
- switch(g_conf->mds_bal_mode) {
+ switch(g_conf()->mds_bal_mode) {
case 0:
return
.8 * auth.meta_load() +
/* timeout: if we waste half our time waiting for RADOS, then abort! */
lock.Lock();
- int ret_t = cond.WaitInterval(lock, utime_t(mds->cct->_conf->get_val<int64_t>("mds_bal_interval")/2, 0));
+ int ret_t = cond.WaitInterval(lock, utime_t(mds->cct->_conf.get_val<int64_t>("mds_bal_interval")/2, 0));
lock.Unlock();
/* success: store the balancer in memory and set the version. */
// Pass on to MDCache: note that the split might still not
// happen if the checks in MDCache::can_fragment fail.
dout(10) << __func__ << " splitting " << *split_dir << dendl;
- mds->mdcache->split_dir(split_dir, g_conf->mds_bal_split_bits);
+ mds->mdcache->split_dir(split_dir, g_conf()->mds_bal_split_bits);
};
bool is_new = false;
// Set a timer to really do the split: we don't do it immediately
// so that bursts of ops on a directory have a chance to go through
// before we freeze it.
- mds->timer.add_event_after(g_conf->mds_bal_fragment_interval,
+ mds->timer.add_event_after(g_conf()->mds_bal_fragment_interval,
new FunctionContext(callback));
}
}
if (merge_pending.count(frag) == 0) {
dout(20) << __func__ << " enqueued dir " << *dir << dendl;
merge_pending.insert(frag);
- mds->timer.add_event_after(g_conf->mds_bal_fragment_interval,
+ mds->timer.add_event_after(g_conf()->mds_bal_fragment_interval,
new FunctionContext(callback));
} else {
dout(20) << __func__ << " dir already in queue " << *dir << dendl;
{
balance_state_t state;
- if (g_conf->mds_thrash_exports) {
+ if (g_conf()->mds_thrash_exports) {
//we're going to randomly export to all the mds in the cluster
set<mds_rank_t> up_mds;
mds->get_mds_map()->get_up_mds_set(up_mds);
// under or over?
for (auto p : load_map) {
- if (p.first < target_load * (1.0 + g_conf->mds_bal_min_rebalance)) {
+ if (p.first < target_load * (1.0 + g_conf()->mds_bal_min_rebalance)) {
dout(5) << " mds." << p.second << " is underloaded or barely overloaded." << dendl;
mds_last_epoch_under_map[p.second] = beat_epoch;
}
void MDBalancer::try_rebalance(balance_state_t& state)
{
- if (g_conf->mds_thrash_exports) {
+ if (g_conf()->mds_thrash_exports) {
dout(5) << "mds_thrash is on; not performing standard rebalance operation!"
<< dendl;
return;
mds_rank_t from = diri->authority().first;
double pop = dir->pop_auth_subtree.meta_load();
- if (g_conf->mds_bal_idle_threshold > 0 &&
- pop < g_conf->mds_bal_idle_threshold &&
+ if (g_conf()->mds_bal_idle_threshold > 0 &&
+ pop < g_conf()->mds_bal_idle_threshold &&
diri != mds->mdcache->get_root() &&
from != mds->get_nodeid()) {
dout(5) << " exporting idle (" << pop << ") import " << *dir
assert(dir->is_auth());
double need = amount - have;
- if (need < amount * g_conf->mds_bal_min_start)
+ if (need < amount * g_conf()->mds_bal_min_start)
return; // good enough!
- double needmax = need * g_conf->mds_bal_need_max;
- double needmin = need * g_conf->mds_bal_need_min;
- double midchunk = need * g_conf->mds_bal_midchunk;
- double minchunk = need * g_conf->mds_bal_minchunk;
+ double needmax = need * g_conf()->mds_bal_need_max;
+ double needmin = need * g_conf()->mds_bal_need_min;
+ double midchunk = need * g_conf()->mds_bal_midchunk;
+ double minchunk = need * g_conf()->mds_bal_minchunk;
list<CDir*> bigger_rep, bigger_unrep;
multimap<double, CDir*> smaller;
void MDBalancer::maybe_fragment(CDir *dir, bool hot)
{
// split/merge
- if (mds->cct->_conf->get_val<bool>("mds_bal_fragment_dirs") &&
- g_conf->mds_bal_fragment_interval > 0 &&
+ if (mds->cct->_conf.get_val<bool>("mds_bal_fragment_dirs") &&
+ g_conf()->mds_bal_fragment_interval > 0 &&
!dir->inode->is_base() && // not root/base (for now at least)
dir->is_auth()) {
// split
- if (g_conf->mds_bal_split_size > 0 && (dir->should_split() || hot)) {
+ if (g_conf()->mds_bal_split_size > 0 && (dir->should_split() || hot)) {
if (split_pending.count(dir->dirfrag()) == 0) {
queue_split(dir, false);
} else {
// hit me
double v = dir->pop_me.get(type).hit(amount);
- const bool hot = (v > g_conf->mds_bal_split_rd && type == META_POP_IRD) ||
- (v > g_conf->mds_bal_split_wr && type == META_POP_IWR);
+ const bool hot = (v > g_conf()->mds_bal_split_rd && type == META_POP_IRD) ||
+ (v > g_conf()->mds_bal_split_wr && type == META_POP_IWR);
dout(20) << "hit_dir " << type << " pop is " << v << ", frag " << dir->get_frag()
<< " size " << dir->get_frag_size() << " " << dir->pop_me << dendl;
if (dir->is_auth() && !dir->is_ambiguous_auth()) {
if (!dir->is_rep() &&
- dir_pop >= g_conf->mds_bal_replicate_threshold) {
+ dir_pop >= g_conf()->mds_bal_replicate_threshold) {
// replicate
double rdp = dir->pop_me.get(META_POP_IRD).get();
rd_adj = rdp / mds->get_mds_map()->get_num_in_mds() - rdp;
if (dir->ino() != 1 &&
dir->is_rep() &&
- dir_pop < g_conf->mds_bal_unreplicate_threshold) {
+ dir_pop < g_conf()->mds_bal_unreplicate_threshold) {
// unreplicate
dout(5) << "unreplicating dir " << *dir << " pop " << dir_pop << dendl;
num_shadow_inodes = 0;
num_inodes_with_caps = 0;
- max_dir_commit_size = g_conf->mds_dir_max_commit_size ?
- (g_conf->mds_dir_max_commit_size << 20) :
- (0.9 *(g_conf->osd_max_write_size << 20));
+ max_dir_commit_size = g_conf()->mds_dir_max_commit_size ?
+ (g_conf()->mds_dir_max_commit_size << 20) :
+ (0.9 *(g_conf()->osd_max_write_size << 20));
discover_last_tid = 0;
open_ino_last_tid = 0;
bottom_lru.lru_set_midpoint(0);
- decayrate.set_halflife(g_conf->mds_decay_halflife);
+ decayrate.set_halflife(g_conf()->mds_decay_halflife);
did_shutdown_log_cap = false;
{
file_layout_t result = file_layout_t::get_default();
result.pool_id = mdsmap.get_metadata_pool();
- if (g_conf->mds_log_segment_size > 0) {
- result.object_size = g_conf->mds_log_segment_size;
- result.stripe_unit = g_conf->mds_log_segment_size;
+ if (g_conf()->mds_log_segment_size > 0) {
+ result.object_size = g_conf()->mds_log_segment_size;
+ result.stripe_unit = g_conf()->mds_log_segment_size;
}
return result;
}
memset(&in->inode.dir_layout, 0, sizeof(in->inode.dir_layout));
if (in->inode.is_dir()) {
- in->inode.dir_layout.dl_dir_hash = g_conf->mds_default_dir_hash;
+ in->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
++in->inode.rstat.rsubdirs;
} else {
in->inode.layout = default_file_layout;
CInode *MDCache::create_root_inode()
{
CInode *i = create_system_inode(MDS_INO_ROOT, S_IFDIR|0755);
- i->inode.uid = g_conf->mds_root_ino_uid;
- i->inode.gid = g_conf->mds_root_ino_gid;
+ i->inode.uid = g_conf()->mds_root_ino_uid;
+ i->inode.gid = g_conf()->mds_root_ino_gid;
i->inode.layout = default_file_layout;
i->inode.layout.pool_id = mds->mdsmap->get_first_data_pool();
return i;
linkunlink, update);
}
- if (g_conf->mds_snap_rstat) {
+ if (g_conf()->mds_snap_rstat) {
for (const auto &p : cur->dirty_old_rstats) {
auto &old = cur->old_inodes[p];
snapid_t ofirst = std::max(old.first, floor);
snapid_t first;
fnode_t *pf = parent->get_projected_fnode();
if (last == CEPH_NOSNAP) {
- if (g_conf->mds_snap_rstat)
+ if (g_conf()->mds_snap_rstat)
first = std::max(ofirst, parent->first);
else
first = parent->first;
parent->dirty_old_rstat[first-1].accounted_rstat = pf->accounted_rstat;
}
parent->first = first;
- } else if (!g_conf->mds_snap_rstat) {
+ } else if (!g_conf()->mds_snap_rstat) {
// drop snapshots' rstats
break;
} else if (last >= parent->first) {
// delay propagating until later?
if (!stop && !first &&
- g_conf->mds_dirstat_min_interval > 0) {
+ g_conf()->mds_dirstat_min_interval > 0) {
double since_last_prop = mut->get_mds_stamp() - pin->last_dirstat_prop;
- if (since_last_prop < g_conf->mds_dirstat_min_interval) {
+ if (since_last_prop < g_conf()->mds_dirstat_min_interval) {
dout(10) << "predirty_journal_parents last prop " << since_last_prop
- << " < " << g_conf->mds_dirstat_min_interval
+ << " < " << g_conf()->mds_dirstat_min_interval
<< ", stopping" << dendl;
stop = true;
} else {
if (parent->get_frag() == frag_t()) { // i.e., we are the only frag
if (pi.inode.dirstat.size() < 0)
- assert(!"negative dirstat size" == g_conf->mds_verify_scatter);
+ assert(!"negative dirstat size" == g_conf()->mds_verify_scatter);
if (pi.inode.dirstat.size() != pf->fragstat.size()) {
mds->clog->error() << "unmatched fragstat size on single dirfrag "
<< parent->dirfrag() << ", inode has " << pi.inode.dirstat
// trust the dirfrag for now
pi.inode.dirstat = pf->fragstat;
- assert(!"unmatched fragstat size" == g_conf->mds_verify_scatter);
+ assert(!"unmatched fragstat size" == g_conf()->mds_verify_scatter);
}
}
}
// first, if the frag is stale, bring it back in sync.
parent->resync_accounted_rstat();
- if (g_conf->mds_snap_rstat) {
+ if (g_conf()->mds_snap_rstat) {
for (auto &p : parent->dirty_old_rstat) {
project_rstat_frag_to_inode(p.second.rstat, p.second.accounted_rstat, p.second.first,
p.first, pin, true);
// trust the dirfrag for now
pi.inode.rstat = pf->rstat;
- assert(!"unmatched rstat rbytes" == g_conf->mds_verify_scatter);
+ assert(!"unmatched rstat rbytes" == g_conf()->mds_verify_scatter);
}
}
if (!in->is_dir()) {
assert(in->state_test(CInode::STATE_REJOINUNDEF));
in->inode.mode = S_IFDIR;
- in->inode.dir_layout.dl_dir_hash = g_conf->mds_default_dir_hash;
+ in->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
}
CDir *dir = in->get_or_open_dirfrag(this, df.frag);
dir->state_set(CDir::STATE_REJOINUNDEF);
diri = new CInode(this, false);
diri->inode.ino = p->first.ino;
diri->inode.mode = S_IFDIR;
- diri->inode.dir_layout.dl_dir_hash = g_conf->mds_default_dir_hash;
+ diri->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
add_inode(diri);
if (MDS_INO_MDSDIR(from) == p->first.ino) {
diri->inode_auth = mds_authority_t(from, CDIR_AUTH_UNKNOWN);
in = new CInode(this, false, q->second.first, q->first.snapid);
in->inode.ino = q->second.ino;
in->inode.mode = S_IFDIR;
- in->inode.dir_layout.dl_dir_hash = g_conf->mds_default_dir_hash;
+ in->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
add_inode(in);
dout(10) << " add inode " << *in << dendl;
} else if (in->get_parent_dn()) {
rejoin_undef_inodes.erase(in);
if (in->is_dir()) {
// FIXME: re-hash dentries if necessary
- assert(in->inode.dir_layout.dl_dir_hash == g_conf->mds_default_dir_hash);
+ assert(in->inode.dir_layout.dl_dir_hash == g_conf()->mds_default_dir_hash);
if (in->has_dirfrags() && !in->dirfragtree.is_leaf(frag_t())) {
CDir *dir = in->get_dirfrag(frag_t());
assert(dir);
// cache
char old_val[32] = { 0 };
char *o = old_val;
- g_conf->get_val("debug_mds", &o, sizeof(old_val));
- g_conf->set_val("debug_mds", "10");
- g_conf->apply_changes(NULL);
+ g_conf().get_val("debug_mds", &o, sizeof(old_val));
+ g_conf().set_val("debug_mds", "10");
+ g_conf().apply_changes(NULL);
show_cache();
- g_conf->set_val("debug_mds", old_val);
- g_conf->apply_changes(NULL);
- mds->timer.add_event_after(g_conf->mds_shutdown_check, new C_MDC_ShutdownCheck(this));
+ g_conf().set_val("debug_mds", old_val);
+ g_conf().apply_changes(NULL);
+ mds->timer.add_event_after(g_conf()->mds_shutdown_check, new C_MDC_ShutdownCheck(this));
// this
dout(0) << "lru size now " << lru.lru_get_size() << "/" << bottom_lru.lru_get_size() << dendl;
{
dout(2) << "shutdown_start" << dendl;
- if (g_conf->mds_shutdown_check)
- mds->timer.add_event_after(g_conf->mds_shutdown_check, new C_MDC_ShutdownCheck(this));
+ if (g_conf()->mds_shutdown_check)
+ mds->timer.add_event_after(g_conf()->mds_shutdown_check, new C_MDC_ShutdownCheck(this));
- // g_conf->debug_mds = 10;
+ // g_conf()->debug_mds = 10;
}
// see comment in Migrator::find_stale_export_freeze()
utime_t now = ceph_clock_now();
utime_t cutoff = now;
- cutoff -= g_conf->mds_freeze_tree_timeout;
+ cutoff -= g_conf()->mds_freeze_tree_timeout;
for (map<dirfrag_t,fragment_info_t>::iterator p = fragments.begin();
p != fragments.end(); ) {
list<MDSInternalContextBase*> waiters;
adjust_dir_fragments(diri, info.dirs, basedirfrag.frag, info.bits,
info.resultfrags, waiters, false);
- if (g_conf->mds_debug_frag)
+ if (g_conf()->mds_debug_frag)
diri->verify_dirfrags();
mds->queue_waiters(waiters);
list<MDSInternalContextBase*> waiters;
list<CDir*> resultfrags;
adjust_dir_fragments(diri, base, bits, resultfrags, waiters, false);
- if (g_conf->mds_debug_frag)
+ if (g_conf()->mds_debug_frag)
diri->verify_dirfrags();
for (list<CDir*>::iterator p = resultfrags.begin(); p != resultfrags.end(); ++p)
ls->dirty_dirfrag_dirfragtree.push_back(&diri->item_dirty_dirfrag_dirfragtree);
}
- if (g_conf->mds_debug_frag)
+ if (g_conf()->mds_debug_frag)
diri->verify_dirfrags();
for (list<frag_t>::iterator q = old_frags.begin(); q != old_frags.end(); ++q)
void MDCache::show_subtrees(int dbl)
{
- if (g_conf->mds_thrash_exports)
+ if (g_conf()->mds_thrash_exports)
dbl += 15;
//dout(10) << "show_subtrees" << dendl;
- if (!g_conf->subsys.should_gather(ceph_subsys_mds, dbl))
+ if (!g_conf()->subsys.should_gather(ceph_subsys_mds, dbl))
return; // i won't print anything.
if (subtrees.empty()) {
public:
static uint64_t cache_limit_inodes(void) {
- return g_conf->get_val<int64_t>("mds_cache_size");
+ return g_conf().get_val<int64_t>("mds_cache_size");
}
static uint64_t cache_limit_memory(void) {
- return g_conf->get_val<uint64_t>("mds_cache_memory_limit");
+ return g_conf().get_val<uint64_t>("mds_cache_memory_limit");
}
static double cache_reservation(void) {
- return g_conf->get_val<double>("mds_cache_reservation");
+ return g_conf().get_val<double>("mds_cache_reservation");
}
static double cache_mid(void) {
- return g_conf->get_val<double>("mds_cache_mid");
+ return g_conf().get_val<double>("mds_cache_mid");
}
static double cache_health_threshold(void) {
- return g_conf->get_val<double>("mds_health_cache_threshold");
+ return g_conf().get_val<double>("mds_health_cache_threshold");
}
double cache_toofull_ratio(void) const {
uint64_t inode_limit = cache_limit_inodes();
void finish(int r) override {
MDSRank *mds = get_mds();
// assume journal is reliable, so don't choose action based on
- // g_conf->mds_action_on_write_error.
+ // g_conf()->mds_action_on_write_error.
if (r == -EBLACKLISTED) {
derr << "we have been blacklisted (fenced), respawning..." << dendl;
mds->respawn();
assert(journaler->is_readonly());
journaler->set_write_error_handler(new C_MDL_WriteError(this));
journaler->set_writeable();
- journaler->create(&mds->mdcache->default_log_layout, g_conf->mds_journal_format);
+ journaler->create(&mds->mdcache->default_log_layout, g_conf()->mds_journal_format);
journaler->write_head(gather.new_sub());
// Async write JournalPointer to RADOS
// disambiguate imports. Because the ESubtreeMap reflects the subtree
// state when all EImportFinish events are replayed.
} else if (ls->end/period != ls->offset/period ||
- ls->num_events >= g_conf->mds_log_events_per_segment) {
+ ls->num_events >= g_conf()->mds_log_events_per_segment) {
dout(10) << "submit_entry also starting new segment: last = "
<< ls->seq << "/" << ls->offset << ", event seq = " << event_seq << dendl;
_start_new_segment();
- } else if (g_conf->mds_debug_subtrees &&
+ } else if (g_conf()->mds_debug_subtrees &&
le->get_type() != EVENT_SUBTREEMAP_TEST) {
// debug: journal this every time to catch subtree replay bugs.
// use a different event id so it doesn't get interpreted as a
submit_mutex.Lock();
while (!mds->is_daemon_stopping()) {
- if (g_conf->mds_log_pause) {
+ if (g_conf()->mds_log_pause) {
submit_cond.Wait(submit_mutex);
continue;
}
void MDLog::trim(int m)
{
- unsigned max_segments = g_conf->mds_log_max_segments;
- int max_events = g_conf->mds_log_max_events;
+ unsigned max_segments = g_conf()->mds_log_max_segments;
+ int max_events = g_conf()->mds_log_max_events;
if (m >= 0)
max_events = m;
}
// Clamp max_events to not be smaller than events per segment
- if (max_events > 0 && max_events <= g_conf->mds_log_events_per_segment) {
- max_events = g_conf->mds_log_events_per_segment + 1;
+ if (max_events > 0 && max_events <= g_conf()->mds_log_events_per_segment) {
+ max_events = g_conf()->mds_log_events_per_segment + 1;
}
submit_mutex.Lock();
void MDLog::_recovery_thread(MDSInternalContextBase *completion)
{
assert(journaler == NULL);
- if (g_conf->mds_journal_format > JOURNAL_FORMAT_MAX) {
+ if (g_conf()->mds_journal_format > JOURNAL_FORMAT_MAX) {
dout(0) << "Configuration value for mds_journal_format is out of bounds, max is "
<< JOURNAL_FORMAT_MAX << dendl;
}
completion->complete(-EINVAL);
}
- } else if (mds->is_standby_replay() || front_journal->get_stream_format() >= g_conf->mds_journal_format) {
+ } else if (mds->is_standby_replay() || front_journal->get_stream_format() >= g_conf()->mds_journal_format) {
/* The journal is of configured format, or we are in standbyreplay and will
* tolerate replaying old journals until we have to go active. Use front_journal as
* our journaler attribute and complete */
dout(4) << "Writing new journal header " << jp.back << dendl;
file_layout_t new_layout = old_journal->get_layout();
new_journal->set_writeable();
- new_journal->create(&new_layout, g_conf->mds_journal_format);
+ new_journal->create(&new_layout, g_conf()->mds_journal_format);
/* Write the new journal header to RADOS */
C_SaferCond write_head_wait;
mds->clog->error() << "corrupt journal event at " << pos << "~"
<< bl.length() << " / "
<< journaler->get_write_pos();
- if (g_conf->mds_log_skip_corrupt_events) {
+ if (g_conf()->mds_log_skip_corrupt_events) {
continue;
} else {
mds->damaged_unlocked();
return KEYS;
}
-void MDSDaemon::handle_conf_change(const md_config_t *conf,
+void MDSDaemon::handle_conf_change(const md_config_t *mconf,
const std::set <std::string> &changed)
{
// We may be called within mds_lock (via `tell`) or outwith the
mds_lock.Lock();
}
+ ConfigReader conf{mconf};
if (changed.count("mds_op_complaint_time") ||
changed.count("mds_op_log_threshold")) {
if (mds_rank) {
}
}
- if (!g_conf->mds_log_pause && changed.count("mds_log_pause")) {
+ if (!g_conf()->mds_log_pause && changed.count("mds_log_pause")) {
if (mds_rank) {
mds_rank->mdlog->kick_submitter();
}
}
if (mds_rank) {
- mds_rank->handle_conf_change(conf, changed);
+ mds_rank->handle_conf_change(mconf, changed);
}
if (!initially_locked) {
int rotating_auth_attempts = 0;
while (monc->wait_auth_rotating(30.0) < 0) {
- if (++rotating_auth_attempts <= g_conf->max_rotating_auth_attempts) {
+ if (++rotating_auth_attempts <= g_conf()->max_rotating_auth_attempts) {
derr << "unable to obtain rotating service keys; retrying" << dendl;
continue;
}
// Set up admin socket before taking mds_lock, so that ordering
// is consistent (later we take mds_lock within asok callbacks)
set_up_admin_socket();
- g_conf->add_observer(this);
+ g_conf().add_observer(this);
mds_lock.Lock();
if (beacon.get_want_state() == MDSMap::STATE_DNE) {
suicide(); // we could do something more graceful here
// schedule
tick_event = timer.add_event_after(
- g_conf->mds_tick_interval,
+ g_conf()->mds_tick_interval,
new FunctionContext([this](int) {
assert(mds_lock.is_locked_by_me());
tick();
string args = argsvec.front();
for (vector<string>::iterator a = ++argsvec.begin(); a != argsvec.end(); ++a)
args += " " + *a;
- r = cct->_conf->injectargs(args, &ss);
+ r = cct->_conf.injectargs(args, &ss);
} else if (prefix == "config set") {
std::string key;
cmd_getval(cct, cmdmap, "key", key);
std::string val;
cmd_getval(cct, cmdmap, "value", val);
- r = cct->_conf->set_val(key, val, &ss);
+ r = cct->_conf.set_val(key, val, &ss);
if (r == 0) {
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
}
} else if (prefix == "config unset") {
std::string key;
cmd_getval(cct, cmdmap, "key", key);
- r = cct->_conf->rm_val(key);
+ r = cct->_conf.rm_val(key);
if (r == 0) {
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
}
if (r == -ENOENT) {
r = 0; // idempotent
bool got = cmd_getval(cct, cmdmap, "session_id", session_id);
assert(got);
bool killed = mds_rank->evict_client(session_id, false,
- g_conf->mds_session_blacklist_on_evict,
+ g_conf()->mds_session_blacklist_on_evict,
ss);
if (!killed)
r = -ENOENT;
const auto myid = monc->get_global_id();
// We have entered a rank-holding state, we shouldn't be back
// here!
- if (g_conf->mds_enforce_unique_name) {
+ if (g_conf()->mds_enforce_unique_name) {
if (mds_gid_t existing = mdsmap->find_mds_gid_by_name(name)) {
const MDSMap::mds_info_t& i = mdsmap->get_info_gid(existing);
if (i.global_id > myid) {
//because add_observer is called after set_up_admin_socket
//so we can use asok_hook to avoid assert in the remove_observer
if (asok_hook != NULL)
- g_conf->remove_observer(this);
+ g_conf().remove_observer(this);
clean_up_admin_socket();
damage_table(whoami_),
inotable(NULL), snapserver(NULL), snapclient(NULL),
sessionmap(this), logger(NULL), mlogger(NULL),
- op_tracker(g_ceph_context, g_conf->mds_enable_op_tracker,
- g_conf->osd_num_op_tracker_shard),
+ op_tracker(g_ceph_context, g_conf()->mds_enable_op_tracker,
+ g_conf()->osd_num_op_tracker_shard),
last_state(MDSMap::STATE_BOOT),
state(MDSMap::STATE_BOOT),
cluster_degraded(false), stopping(false),
void MDSRank::hit_export_target(mds_rank_t rank, double amount)
{
- double rate = g_conf->mds_bal_target_decay;
+ double rate = g_conf()->mds_bal_target_decay;
if (amount < 0.0) {
- amount = 100.0/g_conf->mds_bal_target_decay; /* a good default for "i am trying to keep this export_target active" */
+ amount = 100.0/g_conf()->mds_bal_target_decay; /* a good default for "i am trying to keep this export_target active" */
}
auto em = export_targets.emplace(std::piecewise_construct, std::forward_as_tuple(rank), std::forward_as_tuple(DecayRate(rate)));
auto &counter = em.first->second;
beacon.set_want_state(mdsmap, MDSMap::STATE_DAMAGED);
monc->flush_log(); // Flush any clog error from before we were called
beacon.notify_health(this); // Include latest status in our swan song
- beacon.send_and_wait(g_conf->mds_mon_shutdown_timeout);
+ beacon.send_and_wait(g_conf()->mds_mon_shutdown_timeout);
// It's okay if we timed out and the mon didn't get our beacon, because
// another daemon (or ourselves after respawn) will eventually take the
return;
}
- if (g_conf->mds_action_on_write_error >= 2) {
+ if (g_conf()->mds_action_on_write_error >= 2) {
derr << "unhandled write error " << cpp_strerror(err) << ", suicide..." << dendl;
respawn();
- } else if (g_conf->mds_action_on_write_error == 1) {
+ } else if (g_conf()->mds_action_on_write_error == 1) {
derr << "unhandled write error " << cpp_strerror(err) << ", force readonly..." << dendl;
mdcache->force_readonly();
} else {
/*double el = now - start;
if (el > 30.0 &&
el < 60.0)*/
- for (int i=0; i<g_conf->mds_thrash_exports; i++) {
+ for (int i=0; i<g_conf()->mds_thrash_exports; i++) {
set<mds_rank_t> s;
if (!is_active()) break;
mdsmap->get_mds_set(s, MDSMap::STATE_ACTIVE);
if (s.size() < 2 || CInode::count() < 10)
break; // need peers for this to work.
- if (mdcache->migrator->get_num_exporting() > g_conf->mds_thrash_exports * 5 ||
- mdcache->migrator->get_export_queue_size() > g_conf->mds_thrash_exports * 10)
+ if (mdcache->migrator->get_num_exporting() > g_conf()->mds_thrash_exports * 5 ||
+ mdcache->migrator->get_export_queue_size() > g_conf()->mds_thrash_exports * 10)
break;
- dout(7) << "mds thrashing exports pass " << (i+1) << "/" << g_conf->mds_thrash_exports << dendl;
+ dout(7) << "mds thrashing exports pass " << (i+1) << "/" << g_conf()->mds_thrash_exports << dendl;
// pick a random dir inode
CInode *in = mdcache->hack_pick_random_inode();
}
}
// hack: thrash fragments
- for (int i=0; i<g_conf->mds_thrash_fragments; i++) {
+ for (int i=0; i<g_conf()->mds_thrash_fragments; i++) {
if (!is_active()) break;
- if (mdcache->get_num_fragmenting_dirs() > 5 * g_conf->mds_thrash_fragments) break;
- dout(7) << "mds thrashing fragments pass " << (i+1) << "/" << g_conf->mds_thrash_fragments << dendl;
+ if (mdcache->get_num_fragmenting_dirs() > 5 * g_conf()->mds_thrash_fragments) break;
+ dout(7) << "mds thrashing fragments pass " << (i+1) << "/" << g_conf()->mds_thrash_fragments << dendl;
// pick a random dir inode
CInode *in = mdcache->hack_pick_random_inode();
// NB not enabling suicide grace, because the mon takes care of killing us
// (by blacklisting us) when we fail to send beacons, and it's simpler to
// only have one way of dying.
- g_ceph_context->get_heartbeat_map()->reset_timeout(hb, g_conf->mds_beacon_grace, 0);
+ g_ceph_context->get_heartbeat_map()->reset_timeout(hb, g_conf()->mds_beacon_grace, 0);
}
bool MDSRank::is_stale_message(Message *m) const
// The replay was done in standby state, and we are still in that state
assert(standby_replaying);
dout(10) << "setting replay timer" << dendl;
- timer.add_event_after(g_conf->mds_replay_interval,
+ timer.add_event_after(g_conf()->mds_replay_interval,
new C_MDS_StandbyReplayRestart(this));
return;
} else if (standby_replaying) {
assert(!is_standby_replay());
// Reformat and come back here
- if (mdlog->get_journaler()->get_stream_format() < g_conf->mds_journal_format) {
+ if (mdlog->get_journaler()->get_stream_format() < g_conf()->mds_journal_format) {
dout(4) << "reformatting journal on standbyreplay->replay transition" << dendl;
mdlog->reopen(new C_MDS_BootStart(this, MDS_BOOT_REPLAY_DONE));
return;
snapserver->save(new C_MDSInternalNoop);
}
- if (g_conf->mds_wipe_sessions) {
+ if (g_conf()->mds_wipe_sessions) {
dout(1) << "wiping out client sessions" << dendl;
sessionmap.wipe();
sessionmap.save(new C_MDSInternalNoop);
}
- if (g_conf->mds_wipe_ino_prealloc) {
+ if (g_conf()->mds_wipe_ino_prealloc) {
dout(1) << "wiping out ino prealloc from sessions" << dendl;
sessionmap.wipe_ino_prealloc();
sessionmap.save(new C_MDSInternalNoop);
}
- if (g_conf->mds_skip_ino) {
- inodeno_t i = g_conf->mds_skip_ino;
+ if (g_conf()->mds_skip_ino) {
+ inodeno_t i = g_conf()->mds_skip_ino;
dout(1) << "skipping " << i << " inodes" << dendl;
inotable->skip_inos(i);
inotable->save(new C_MDSInternalNoop);
snapserver->save(fin.new_sub());
}
- assert(g_conf->mds_kill_create_at != 1);
+ assert(g_conf()->mds_kill_create_at != 1);
// ok now journal it
mdlog->journal_segment_subtree_map(fin.new_sub());
for (const auto &s : victims) {
std::stringstream ss;
evict_client(s->get_client().v, false,
- g_conf->mds_session_blacklist_on_evict, ss, gather.new_sub());
+ g_conf()->mds_session_blacklist_on_evict, ss, gather.new_sub());
}
gather.activate();
}
objecter->set_client_incarnation(incarnation);
// for debug
- if (g_conf->mds_dump_cache_on_map)
+ if (g_conf()->mds_dump_cache_on_map)
mdcache->dump_cache();
// did it change?
rejoin_joint_start();
// did we finish?
- if (g_conf->mds_dump_cache_after_rejoin &&
+ if (g_conf()->mds_dump_cache_after_rejoin &&
oldmap->is_rejoining() && !mdsmap->is_rejoining())
mdcache->dump_cache(); // for DEBUG only
mds_lock.Lock();
std::stringstream dss;
bool evicted = evict_client(strtol(client_id.c_str(), 0, 10), true,
- g_conf->mds_session_blacklist_on_evict, dss);
+ g_conf()->mds_session_blacklist_on_evict, dss);
if (!evicted) {
dout(15) << dss.str() << dendl;
ss << dss.str();
for (const auto s : victims) {
std::stringstream ss;
evict_client(s->get_client().v, false,
- g_conf->mds_session_blacklist_on_evict, ss, gather.new_sub());
+ g_conf()->mds_session_blacklist_on_evict, ss, gather.new_sub());
}
gather.activate();
}
break;
case TABLESERVER_OP_NOTIFY_PREP:
- assert(g_conf->mds_kill_mdstable_at != 9);
+ assert(g_conf()->mds_kill_mdstable_at != 9);
handle_notify_prep(m);
break;
if (pending_prepare.count(reqid)) {
dout(10) << "got agree on " << reqid << " atid " << tid << dendl;
- assert(g_conf->mds_kill_mdstable_at != 3);
+ assert(g_conf()->mds_kill_mdstable_at != 3);
MDSInternalContextBase *onfinish = pending_prepare[reqid].onfinish;
*pending_prepare[reqid].ptid = tid;
pending_commit[tid]->pending_commit_tids[table].count(tid)) {
dout(10) << "got ack on tid " << tid << ", logging" << dendl;
- assert(g_conf->mds_kill_mdstable_at != 7);
+ assert(g_conf()->mds_kill_mdstable_at != 7);
// remove from committing list
pending_commit[tid]->pending_commit_tids[table].erase(tid);
notify_commit(tid);
- assert(g_conf->mds_kill_mdstable_at != 4);
+ assert(g_conf()->mds_kill_mdstable_at != 4);
if (server_ready) {
// send message
dout(7) << "handle_prepare " << *req << dendl;
mds_rank_t from = mds_rank_t(req->get_source().num());
- assert(g_conf->mds_kill_mdstable_at != 1);
+ assert(g_conf()->mds_kill_mdstable_at != 1);
projected_version++;
dout(7) << "_create_logged " << *req << " tid " << tid << dendl;
mds_rank_t from = mds_rank_t(req->get_source().num());
- assert(g_conf->mds_kill_mdstable_at != 2);
+ assert(g_conf()->mds_kill_mdstable_at != 2);
_note_prepare(from, req->reqid);
_prepare(req->bl, req->reqid, from);
return;
}
- assert(g_conf->mds_kill_mdstable_at != 5);
+ assert(g_conf()->mds_kill_mdstable_at != 5);
projected_version++;
committing_tids.insert(tid);
{
dout(7) << "_commit_logged, sending ACK" << dendl;
- assert(g_conf->mds_kill_mdstable_at != 6);
+ assert(g_conf()->mds_kill_mdstable_at != 6);
version_t tid = req->get_tid();
pending_for_mds.erase(tid);
{
dout(7) << "handle_rollback " << *req << dendl;
- assert(g_conf->mds_kill_mdstable_at != 8);
+ assert(g_conf()->mds_kill_mdstable_at != 8);
version_t tid = req->get_tid();
assert(pending_for_mds.count(tid));
assert(!committing_tids.count(tid));
{
utime_t now = ceph_clock_now();
utime_t cutoff = now;
- cutoff -= g_conf->mds_freeze_tree_timeout;
+ cutoff -= g_conf()->mds_freeze_tree_timeout;
/*
// notify bystanders ; wait in aborting state
q->second.state = IMPORT_ABORTING;
import_notify_abort(dir, bounds);
- assert(g_conf->mds_kill_import_at != 10);
+ assert(g_conf()->mds_kill_import_at != 10);
}
break;
void Migrator::audit()
{
- if (!g_conf->subsys.should_gather<ceph_subsys_mds, 5>())
+ if (!g_conf()->subsys.should_gather<ceph_subsys_mds, 5>())
return; // hrm.
// import_state
return;
}
- if (g_conf->mds_thrash_exports) {
+ if (g_conf()->mds_thrash_exports) {
// create random subtree bound (which will not be exported)
list<CDir*> ls;
for (auto p = dir->begin(); p != dir->end(); ++p) {
return;
}
- assert(g_conf->mds_kill_export_at != 1);
+ assert(g_conf()->mds_kill_export_at != 1);
it->second.state = EXPORT_DISCOVERING;
// send ExportDirDiscover (ask target)
mds->get_nodeid(),
it->second.tid);
mds->send_message_mds(discover, dest);
- assert(g_conf->mds_kill_export_at != 2);
+ assert(g_conf()->mds_kill_export_at != 2);
it->second.last_cum_auth_pins_change = ceph_clock_now();
// freeze the subtree
it->second.state = EXPORT_FREEZING;
dir->auth_unpin(this);
- assert(g_conf->mds_kill_export_at != 3);
+ assert(g_conf()->mds_kill_export_at != 3);
} else {
dout(7) << "peer failed to discover (not active?), canceling" << dendl;
// send.
it->second.state = EXPORT_PREPPING;
mds->send_message_mds(prep, it->second.peer);
- assert (g_conf->mds_kill_export_at != 4);
+ assert (g_conf()->mds_kill_export_at != 4);
// make sure any new instantiations of caps are flushed out
assert(it->second.warning_ack_waiting.empty());
const unsigned link_size = 10;
const unsigned null_size = 1;
- uint64_t max_size = g_conf->get_val<uint64_t>("mds_max_export_size");
+ uint64_t max_size = g_conf().get_val<uint64_t>("mds_max_export_size");
uint64_t approx_size = 0;
list<CDir*> dfs;
return;
}
- assert (g_conf->mds_kill_export_at != 5);
+ assert (g_conf()->mds_kill_export_at != 5);
// send warnings
set<CDir*> bounds;
cache->get_subtree_bounds(dir, bounds);
it->second.state = EXPORT_WARNING;
- assert(g_conf->mds_kill_export_at != 6);
+ assert(g_conf()->mds_kill_export_at != 6);
// nobody to warn?
if (it->second.warning_ack_waiting.empty())
export_go(dir); // start export.
cache->show_subtrees();
it->second.state = EXPORT_EXPORTING;
- assert(g_conf->mds_kill_export_at != 7);
+ assert(g_conf()->mds_kill_export_at != 7);
assert(dir->is_frozen_tree_root());
assert(dir->get_cum_auth_pins() == 0);
// send
mds->send_message_mds(req, dest);
- assert(g_conf->mds_kill_export_at != 8);
+ assert(g_conf()->mds_kill_export_at != 8);
mds->hit_export_target(dest, num_exported_inodes+1);
decode(it->second.peer_imported, bp);
it->second.state = EXPORT_LOGGINGFINISH;
- assert (g_conf->mds_kill_export_at != 9);
+ assert (g_conf()->mds_kill_export_at != 9);
set<CDir*> bounds;
cache->get_subtree_bounds(dir, bounds);
// log export completion, then finish (unfreeze, trigger finish context, etc.)
mds->mdlog->submit_entry(le, new C_MDS_ExportFinishLogged(this, dir));
mds->mdlog->flush();
- assert (g_conf->mds_kill_export_at != 10);
+ assert (g_conf()->mds_kill_export_at != 10);
m->put();
}
// wait for notifyacks
stat.state = EXPORT_NOTIFYING;
- assert (g_conf->mds_kill_export_at != 11);
+ assert (g_conf()->mds_kill_export_at != 11);
// no notifies to wait for?
if (stat.notify_ack_waiting.empty()) {
{
dout(5) << "export_finish " << *dir << dendl;
- assert (g_conf->mds_kill_export_at != 12);
+ assert (g_conf()->mds_kill_export_at != 12);
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
if (it == export_state.end()) {
dout(7) << "target must have failed, not sending final commit message. export succeeded anyway." << dendl;
} else {
dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl;
}
- assert(g_conf->mds_kill_export_at != 13);
+ assert(g_conf()->mds_kill_export_at != 13);
// finish export (adjust local cache state)
int num_dentries = 0;
return;
}
- assert (g_conf->mds_kill_import_at != 1);
+ assert (g_conf()->mds_kill_import_at != 1);
// do we have it?
CInode *in = cache->get_inode(m->get_dirfrag().ino);
dout(7) << " sending export_discover_ack on " << *in << dendl;
mds->send_message_mds(new MExportDirDiscoverAck(df, m->get_tid()), p_state->peer);
m->put();
- assert (g_conf->mds_kill_import_at != 2);
+ assert (g_conf()->mds_kill_import_at != 2);
}
void Migrator::import_reverse_discovering(dirfrag_t df)
it->second.state = IMPORT_PREPPING;
it->second.bound_ls = m->get_bounds();
it->second.bystanders = m->get_bystanders();
- assert(g_conf->mds_kill_import_at != 3);
+ assert(g_conf()->mds_kill_import_at != 3);
// bystander list
dout(7) << "bystanders are " << it->second.bystanders << dendl;
dout(7) << " sending export_prep_ack on " << *dir << dendl;
mds->send_message(new MExportDirPrepAck(dir->dirfrag(), success, m->get_tid()), m->get_connection());
- assert(g_conf->mds_kill_import_at != 4);
+ assert(g_conf()->mds_kill_import_at != 4);
// done
m->put();
}
/* This function DOES put the passed message before returning*/
void Migrator::handle_export_dir(MExportDir *m)
{
- assert (g_conf->mds_kill_import_at != 5);
+ assert (g_conf()->mds_kill_import_at != 5);
CDir *dir = cache->get_dirfrag(m->dirfrag);
assert(dir);
// note state
it->second.state = IMPORT_LOGGINGSTART;
- assert (g_conf->mds_kill_import_at != 6);
+ assert (g_conf()->mds_kill_import_at != 6);
// log it
mds->mdlog->submit_entry(le, onlogged);
dout(7) << "no bystanders, finishing reverse now" << dendl;
import_reverse_unfreeze(dir);
} else {
- assert (g_conf->mds_kill_import_at != 10);
+ assert (g_conf()->mds_kill_import_at != 10);
}
}
// note state
it->second.state = IMPORT_ACKING;
- assert (g_conf->mds_kill_import_at != 7);
+ assert (g_conf()->mds_kill_import_at != 7);
// force open client sessions and finish cap import
mds->server->finish_force_open_sessions(imported_session_map, false);
encode(imported_caps, ack->imported_caps);
mds->send_message_mds(ack, from);
- assert (g_conf->mds_kill_import_at != 8);
+ assert (g_conf()->mds_kill_import_at != 8);
cache->show_subtrees();
}
}
// log finish
- assert(g_conf->mds_kill_import_at != 9);
+ assert(g_conf()->mds_kill_import_at != 9);
if (it->second.state == IMPORT_ACKING) {
for (map<CInode*, map<client_t,Capability::Export> >::iterator p = it->second.peer_exports.begin();
in->auth_unpin(this);
}
-void Migrator::handle_conf_change(const md_config_t *conf,
+void Migrator::handle_conf_change(const md_config_t* conf,
const std::set <std::string> &changed,
const MDSMap &mds_map)
{
// -- cons --
Migrator(MDSRank *m, MDCache *c) : mds(m), cache(c) {
- inject_session_race = g_conf->get_val<bool>("mds_inject_migrator_session_race");
+ inject_session_race = g_conf().get_val<bool>("mds_inject_migrator_session_race");
}
void handle_conf_change(const md_config_t *conf,
});
timer.add_event_after(
- g_conf->mds_purge_queue_busy_flush_period,
+ g_conf()->mds_purge_queue_busy_flush_period,
delayed_flush);
}
}
const uint64_t num = (item.size > 0) ?
Striper::get_num_objects(item.layout, item.size) : 1;
- ops_required = std::min(num, g_conf->filer_max_purge_ops);
+ ops_required = std::min(num, g_conf()->filer_max_purge_ops);
// Account for removing (or zeroing) backtrace
ops_required += 1;
bool PurgeQueue::can_consume()
{
dout(20) << ops_in_flight << "/" << max_purge_ops << " ops, "
- << in_flight.size() << "/" << g_conf->mds_max_purge_files
+ << in_flight.size() << "/" << g_conf()->mds_max_purge_files
<< " files" << dendl;
if (in_flight.size() == 0 && cct->_conf->mds_max_purge_files > 0) {
}
}
-void PurgeQueue::handle_conf_change(const md_config_t *conf,
+void PurgeQueue::handle_conf_change(const md_config_t *mconf,
const std::set <std::string> &changed,
const MDSMap &mds_map)
{
|| changed.count("mds_max_purge_ops_per_pg")) {
update_op_limit(mds_map);
} else if (changed.count("mds_max_purge_files")) {
+ ConfigReader conf{mconf};
Mutex::Locker l(lock);
-
if (in_flight.empty()) {
// We might have gone from zero to a finite limit, so
// might need to kick off consume.
<< file_recover_queue_front_size << " prioritized, "
<< file_recovering.size() << " recovering" << dendl;
- while (file_recovering.size() < g_conf->mds_max_file_recover) {
+ while (file_recovering.size() < g_conf()->mds_max_file_recover) {
if (!file_recover_queue_front.empty()) {
CInode *in = file_recover_queue_front.front();
in->item_recover_queue_front.remove_myself();
"progress and " << stack_size << " in the stack" << dendl;
bool can_continue = true;
elist<CInode*>::iterator i = inode_stack.begin();
- while (g_conf->mds_max_scrub_ops_in_progress > scrubs_in_progress &&
+ while (g_conf()->mds_max_scrub_ops_in_progress > scrubs_in_progress &&
can_continue && !i.end()) {
CInode *curi = *i;
++i; // we have our reference, push iterator forward
<< " scrubbing cdirs" << dendl;
list<CDir*>::iterator i = scrubbing_cdirs.begin();
- while (g_conf->mds_max_scrub_ops_in_progress > scrubs_in_progress) {
+ while (g_conf()->mds_max_scrub_ops_in_progress > scrubs_in_progress) {
// select next CDir
CDir *cur_dir = NULL;
if (i != scrubbing_cdirs.end()) {
<< " (" << path << ")";
} else {
clog->warn() << "Scrub error on inode " << in->ino()
- << " (" << path << ") see " << g_conf->name
+ << " (" << path << ") see " << g_conf()->name
<< " log and `damage ls` output for details";
}
dout(10) << "autoclosing stale session " << session->info.inst << " last "
<< session->last_cap_renew << dendl;
- if (g_conf->mds_session_blacklist_on_timeout) {
+ if (g_conf()->mds_session_blacklist_on_timeout) {
std::stringstream ss;
mds->evict_client(session->get_client().v, false, true,
ss, nullptr);
mds->clog->info() << "denied reconnect attempt (mds is "
<< ceph_mds_state_name(mds->get_state())
<< ") from " << m->get_source_inst()
- << " after " << delay << " (allowed interval " << g_conf->mds_reconnect_timeout << ")";
+ << " after " << delay << " (allowed interval " << g_conf()->mds_reconnect_timeout << ")";
deny = true;
} else if (!session->is_open()) {
dout(1) << " session is closed, ignoring reconnect, sending close" << dendl;
}
utime_t reconnect_end = reconnect_start;
- reconnect_end += g_conf->mds_reconnect_timeout;
+ reconnect_end += g_conf()->mds_reconnect_timeout;
if (ceph_clock_now() >= reconnect_end &&
!client_reconnect_gather.empty()) {
dout(10) << "reconnect timed out" << dendl;
dout(1) << "reconnect gave up on " << session->info.inst << dendl;
mds->clog->warn() << "evicting unresponsive client " << *session
- << ", after waiting " << g_conf->mds_reconnect_timeout
+ << ", after waiting " << g_conf()->mds_reconnect_timeout
<< " seconds during MDS startup";
- if (g_conf->mds_session_blacklist_on_timeout) {
+ if (g_conf()->mds_session_blacklist_on_timeout) {
std::stringstream ss;
mds->evict_client(session->get_client().v, false, true, ss,
gather.new_sub());
void Server::recall_client_state(void)
{
/* try to recall at least 80% of all caps */
- uint64_t max_caps_per_client = Capability::count() * g_conf->get_val<double>("mds_max_ratio_caps_per_client");
- uint64_t min_caps_per_client = g_conf->get_val<uint64_t>("mds_min_caps_per_client");
+ uint64_t max_caps_per_client = Capability::count() * g_conf().get_val<double>("mds_max_ratio_caps_per_client");
+ uint64_t min_caps_per_client = g_conf().get_val<uint64_t>("mds_min_caps_per_client");
if (max_caps_per_client < min_caps_per_client) {
dout(0) << "max_caps_per_client " << max_caps_per_client
<< " < min_caps_per_client " << min_caps_per_client << dendl;
void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn)
{
- if (!g_conf->mds_early_reply)
+ if (!g_conf()->mds_early_reply)
return;
if (mdr->no_early_reply) {
MDRequestRef& mdr)
{
// skip doing this for debugging purposes?
- if (g_conf->mds_inject_traceless_reply_probability &&
+ if (g_conf()->mds_inject_traceless_reply_probability &&
mdr->ls && !mdr->o_trunc &&
- (rand() % 10000 < g_conf->mds_inject_traceless_reply_probability * 10000.0)) {
+ (rand() % 10000 < g_conf()->mds_inject_traceless_reply_probability * 10000.0)) {
dout(5) << "deliberately skipping trace for " << *reply << dendl;
return;
}
mdlog->get_current_segment()->touched_sessions.insert(session->info.inst.name);
if (session->get_num_trim_requests_warnings() > 0 &&
- session->get_num_completed_requests() * 2 < g_conf->mds_max_completed_requests)
+ session->get_num_completed_requests() * 2 < g_conf()->mds_max_completed_requests)
session->reset_num_trim_requests_warnings();
} else {
if (session->get_num_completed_requests() >=
- (g_conf->mds_max_completed_requests << session->get_num_trim_requests_warnings())) {
+ (g_conf()->mds_max_completed_requests << session->get_num_trim_requests_warnings())) {
session->inc_num_trim_requests_warnings();
stringstream ss;
ss << "client." << session->get_client() << " does not advance its oldest_client_tid ("
bool Server::check_fragment_space(MDRequestRef &mdr, CDir *in)
{
const auto size = in->get_frag_size();
- if (size >= g_conf->mds_bal_fragment_size_max) {
- dout(10) << "fragment " << *in << " size exceeds " << g_conf->mds_bal_fragment_size_max << " (ENOSPC)" << dendl;
+ if (size >= g_conf()->mds_bal_fragment_size_max) {
+ dout(10) << "fragment " << *in << " size exceeds " << g_conf()->mds_bal_fragment_size_max << " (ENOSPC)" << dendl;
respond_to_request(mdr, -ENOSPC);
return false;
}
}
if (allow_prealloc_inos &&
- mdr->session->get_num_projected_prealloc_inos() < g_conf->mds_client_prealloc_inos / 2) {
- int need = g_conf->mds_client_prealloc_inos - mdr->session->get_num_projected_prealloc_inos();
+ mdr->session->get_num_projected_prealloc_inos() < g_conf()->mds_client_prealloc_inos / 2) {
+ int need = g_conf()->mds_client_prealloc_inos - mdr->session->get_num_projected_prealloc_inos();
mds->inotable->project_alloc_ids(mdr->prealloc_inos, need);
assert(mdr->prealloc_inos.size()); // or else fix projected increment semantics
mdr->session->pending_prealloc_inos.insert(mdr->prealloc_inos);
memset(&in->inode.dir_layout, 0, sizeof(in->inode.dir_layout));
if (in->inode.is_dir()) {
- in->inode.dir_layout.dl_dir_hash = g_conf->mds_default_dir_hash;
+ in->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
} else if (layout) {
in->inode.layout = *layout;
} else {
server->respond_to_request(mdr, 0);
- assert(g_conf->mds_kill_openc_at != 1);
+ assert(g_conf()->mds_kill_openc_at != 1);
}
};
unsigned max_bytes = req->head.args.readdir.max_bytes;
if (!max_bytes)
// make sure at least one item can be encoded
- max_bytes = (512 << 10) + g_conf->mds_max_xattr_pairs_size;
+ max_bytes = (512 << 10) + g_conf()->mds_max_xattr_pairs_size;
// start final blob
bufferlist dirbl;
cur_xattrs_size += p.first.length() + p.second.length();
}
- if (((cur_xattrs_size + inc) > g_conf->mds_max_xattr_pairs_size)) {
+ if (((cur_xattrs_size + inc) > g_conf()->mds_max_xattr_pairs_size)) {
dout(10) << "xattr kv pairs size too big. cur_xattrs_size "
<< cur_xattrs_size << ", inc " << inc << dendl;
respond_to_request(mdr, -ENOSPC);
}
// go!
- assert(g_conf->mds_kill_link_at != 1);
+ assert(g_conf()->mds_kill_link_at != 1);
// local or remote?
if (targeti->is_auth())
}
dout(10) << " targeti auth has prepared nlink++/--" << dendl;
- assert(g_conf->mds_kill_link_at != 2);
+ assert(g_conf()->mds_kill_link_at != 2);
if (auto& desti_srnode = mdr->more()->desti_srnode) {
delete desti_srnode;
<< (inc ? "link ":"unlink ")
<< *dn << " to " << *targeti << dendl;
- assert(g_conf->mds_kill_link_at != 3);
+ assert(g_conf()->mds_kill_link_at != 3);
if (!mdr->more()->witnessed.empty())
mdcache->logged_master_update(mdr->reqid);
<< " on " << mdr->slave_request->get_object_info()
<< dendl;
- assert(g_conf->mds_kill_link_at != 4);
+ assert(g_conf()->mds_kill_link_at != 4);
CInode *targeti = mdcache->get_inode(mdr->slave_request->get_object_info().ino);
assert(targeti);
mdr->auth_pin(targeti);
//ceph_abort(); // test hack: make sure master can handle a slave that fails to prepare...
- assert(g_conf->mds_kill_link_at != 5);
+ assert(g_conf()->mds_kill_link_at != 5);
// journal it
mdr->ls = mdlog->get_current_segment();
dout(10) << "_logged_slave_link " << *mdr
<< " " << *targeti << dendl;
- assert(g_conf->mds_kill_link_at != 6);
+ assert(g_conf()->mds_kill_link_at != 6);
// update the target
targeti->pop_and_dirty_projected_inode(mdr->ls);
<< " r=" << r
<< " " << *targeti << dendl;
- assert(g_conf->mds_kill_link_at != 7);
+ assert(g_conf()->mds_kill_link_at != 7);
if (r == 0) {
// drop our pins, etc.
{
dout(10) << "_committed_slave " << *mdr << dendl;
- assert(g_conf->mds_kill_link_at != 8);
+ assert(g_conf()->mds_kill_link_at != 8);
MMDSSlaveRequest *req = new MMDSSlaveRequest(mdr->reqid, mdr->attempt,
MMDSSlaveRequest::OP_COMMITTED);
<< " ino " << rollback.ino
<< dendl;
- assert(g_conf->mds_kill_link_at != 9);
+ assert(g_conf()->mds_kill_link_at != 9);
mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes
assert(mdr || mds->is_resolve());
{
dout(10) << "_link_rollback_finish" << dendl;
- assert(g_conf->mds_kill_link_at != 10);
+ assert(g_conf()->mds_kill_link_at != 10);
mut->apply();
<< " " << *m << dendl;
mds_rank_t from = mds_rank_t(m->get_source().num());
- assert(g_conf->mds_kill_link_at != 11);
+ assert(g_conf()->mds_kill_link_at != 11);
// note slave
mdr->more()->slaves.insert(from);
}
*/
- assert(g_conf->mds_kill_rename_at != 1);
+ assert(g_conf()->mds_kill_rename_at != 1);
// -- open all srcdn inode frags, if any --
// we need these open so that auth can properly delegate from inode to dirfrags
// test hack: bail after slave does prepare, so we can verify it's _live_ rollback.
if (!mdr->more()->slaves.empty() && !srci->is_dir())
- assert(g_conf->mds_kill_rename_at != 3);
+ assert(g_conf()->mds_kill_rename_at != 3);
if (!mdr->more()->slaves.empty() && srci->is_dir())
- assert(g_conf->mds_kill_rename_at != 4);
+ assert(g_conf()->mds_kill_rename_at != 4);
// -- declare now --
mdr->set_mds_stamp(ceph_clock_now());
// test hack: test slave commit
if (!mdr->more()->slaves.empty() && !in->is_dir())
- assert(g_conf->mds_kill_rename_at != 5);
+ assert(g_conf()->mds_kill_rename_at != 5);
if (!mdr->more()->slaves.empty() && in->is_dir())
- assert(g_conf->mds_kill_rename_at != 6);
+ assert(g_conf()->mds_kill_rename_at != 6);
// bump popularity
mds->balancer->hit_dir(srcdn->get_dir(), META_POP_IWR);
// did we import srci? if so, explicitly ack that import that, before we unlock and reply.
- assert(g_conf->mds_kill_rename_at != 7);
+ assert(g_conf()->mds_kill_rename_at != 7);
// reply
respond_to_request(mdr, 0);
int max_bytes = req->head.args.readdir.max_bytes;
if (!max_bytes)
// make sure at least one item can be encoded
- max_bytes = (512 << 10) + g_conf->mds_max_xattr_pairs_size;
+ max_bytes = (512 << 10) + g_conf()->mds_max_xattr_pairs_size;
__u64 last_snapid = 0;
string offset_str = req->get_path2();
std::string_view snapname = req->get_filepath().last_dentry();
- if (mdr->client_request->get_caller_uid() < g_conf->mds_snap_min_uid || mdr->client_request->get_caller_uid() > g_conf->mds_snap_max_uid) {
+ if (mdr->client_request->get_caller_uid() < g_conf()->mds_snap_min_uid || mdr->client_request->get_caller_uid() > g_conf()->mds_snap_max_uid) {
dout(20) << "mksnap " << snapname << " on " << *diri << " denied to uid " << mdr->client_request->get_caller_uid() << dendl;
respond_to_request(mdr, -EPERM);
return;
std::string_view snapname = req->get_filepath().last_dentry();
- if (mdr->client_request->get_caller_uid() < g_conf->mds_snap_min_uid || mdr->client_request->get_caller_uid() > g_conf->mds_snap_max_uid) {
+ if (mdr->client_request->get_caller_uid() < g_conf()->mds_snap_min_uid || mdr->client_request->get_caller_uid() > g_conf()->mds_snap_max_uid) {
dout(20) << "rmsnap " << snapname << " on " << *diri << " denied to uid " << mdr->client_request->get_caller_uid() << dendl;
respond_to_request(mdr, -EPERM);
return;
return;
}
- if (mdr->client_request->get_caller_uid() < g_conf->mds_snap_min_uid ||
- mdr->client_request->get_caller_uid() > g_conf->mds_snap_max_uid) {
+ if (mdr->client_request->get_caller_uid() < g_conf()->mds_snap_min_uid ||
+ mdr->client_request->get_caller_uid() > g_conf()->mds_snap_max_uid) {
respond_to_request(mdr, -EPERM);
return;
}
object_locator_t oloc(mds->mdsmap->get_metadata_pool());
C_IO_SM_Load *c = new C_IO_SM_Load(this, false);
ObjectOperation op;
- op.omap_get_vals(last_key, "", g_conf->mds_sessionmap_keys_per_op,
+ op.omap_get_vals(last_key, "", g_conf()->mds_sessionmap_keys_per_op,
&c->session_vals, &c->more_session_vals, &c->values_r);
mds->objecter->read(oid, oloc, op, CEPH_NOSNAP, NULL, 0,
new C_OnFinisher(c, mds->finisher));
ObjectOperation op;
op.omap_get_header(&c->header_bl, &c->header_r);
- op.omap_get_vals("", "", g_conf->mds_sessionmap_keys_per_op,
+ op.omap_get_vals("", "", g_conf()->mds_sessionmap_keys_per_op,
&c->session_vals, &c->more_session_vals, &c->values_r);
mds->objecter->read(oid, oloc, op, CEPH_NOSNAP, NULL, 0, new C_OnFinisher(c, mds->finisher));
if (dirty_sessions.count(s->info.inst.name))
return;
- if (dirty_sessions.size() >= g_conf->mds_sessionmap_keys_per_op) {
+ if (dirty_sessions.size() >= g_conf()->mds_sessionmap_keys_per_op) {
// Pre-empt the usual save() call from journal segment trim, in
// order to avoid building up an oversized OMAP update operation
// from too many sessions modified at once
dout(4) << __func__ << ": writing " << write_sessions.size() << dendl;
// Batch writes into mds_sessionmap_keys_per_op
- const uint32_t kpo = g_conf->mds_sessionmap_keys_per_op;
+ const uint32_t kpo = g_conf()->mds_sessionmap_keys_per_op;
map<string, bufferlist> to_set;
for (uint32_t i = 0; i < write_sessions.size(); ++i) {
const entity_name_t &session_id = write_sessions[i];
dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire" << dendl;
- assert(g_conf->mds_kill_journal_expire_at != 1);
+ assert(g_conf()->mds_kill_journal_expire_at != 1);
// commit dirs
for (elist<CDir*>::iterator p = new_dirfrags.begin(); !p.end(); ++p) {
mds->locker->scatter_nudge(&in->nestlock, gather_bld.new_sub());
}
- assert(g_conf->mds_kill_journal_expire_at != 2);
+ assert(g_conf()->mds_kill_journal_expire_at != 2);
// open files and snap inodes
if (!open_files.empty()) {
}
}
- assert(g_conf->mds_kill_journal_expire_at != 3);
+ assert(g_conf()->mds_kill_journal_expire_at != 3);
// backtraces to be stored/updated
for (elist<CInode*>::iterator p = dirty_parent_inodes.begin(); !p.end(); ++p) {
}
}
- assert(g_conf->mds_kill_journal_expire_at != 4);
+ assert(g_conf()->mds_kill_journal_expire_at != 4);
// slave updates
for (elist<MDSlaveUpdate*>::iterator p = slave_updates.begin(member_offset(MDSlaveUpdate,
dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire waiting" << dendl;
mds->mdlog->flush();
} else {
- assert(g_conf->mds_kill_journal_expire_at != 5);
+ assert(g_conf()->mds_kill_journal_expire_at != 5);
dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire success" << dendl;
}
}
assert(logseg);
- assert(g_conf->mds_kill_journal_replay_at != 1);
+ assert(g_conf()->mds_kill_journal_replay_at != 1);
for (list<std::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
CInode *in = mds->mdcache->get_inode((*p)->inode.ino);
in->state_set(CInode::STATE_AUTH);
else
in->state_clear(CInode::STATE_AUTH);
- assert(g_conf->mds_kill_journal_replay_at != 2);
+ assert(g_conf()->mds_kill_journal_replay_at != 2);
}
// remote dentries
}
}
- assert(g_conf->mds_kill_journal_replay_at != 3);
+ assert(g_conf()->mds_kill_journal_replay_at != 3);
if (renamed_dirino) {
if (renamed_diri) {
} else {
mds->clog->error() << "journal replay sessionmap v " << sessionmapv
<< " -(1|2) > table " << mds->sessionmap.get_version();
- assert(g_conf->mds_wipe_sessions);
+ assert(g_conf()->mds_wipe_sessions);
mds->sessionmap.wipe();
mds->sessionmap.set_version(sessionmapv);
}
// update segment
update_segment(logseg);
- assert(g_conf->mds_kill_journal_replay_at != 4);
+ assert(g_conf()->mds_kill_journal_replay_at != 4);
}
// -----------------------
dout(0) << "journal subtrees: " << subtrees << dendl;
dout(0) << "journal ambig_subtrees: " << ambiguous_subtrees << dendl;
mds->mdcache->show_subtrees();
- assert(!g_conf->mds_debug_subtrees || errors == 0);
+ assert(!g_conf()->mds_debug_subtrees || errors == 0);
}
return;
}
}
metablob.replay(mds, _segment);
- if (in && g_conf->mds_debug_frag)
+ if (in && g_conf()->mds_debug_frag)
in->verify_dirfrags();
}
} else if (what.substr(0, 6) == "config") {
PyFormatter f;
if (what == "config_options") {
- g_conf->config_options(&f);
+ g_conf().config_options(&f);
} else if (what == "config") {
- g_conf->show_config(&f);
+ g_conf().show_config(&f);
}
return f.get();
} else if (what == "mon_map") {
static PyObject*
ceph_get_mgr_id(BaseMgrModule *self, PyObject *args)
{
- return PyString_FromString(g_conf->name.get_id().c_str());
+ return PyString_FromString(g_conf()->name.get_id().c_str());
}
static PyObject*
static PyObject*
ceph_get_mgr_id(BaseMgrStandbyModule *self, PyObject *args)
{
- return PyString_FromString(g_conf->name.get_id().c_str());
+ return PyString_FromString(g_conf()->name.get_id().c_str());
}
static PyObject*
LogChannelRef audit_clog_)
: Dispatcher(g_ceph_context),
client_byte_throttler(new Throttle(g_ceph_context, "mgr_client_bytes",
- g_conf->get_val<Option::size_t>("mgr_client_bytes"))),
+ g_conf().get_val<Option::size_t>("mgr_client_bytes"))),
client_msg_throttler(new Throttle(g_ceph_context, "mgr_client_messages",
- g_conf->get_val<uint64_t>("mgr_client_messages"))),
+ g_conf().get_val<uint64_t>("mgr_client_messages"))),
osd_byte_throttler(new Throttle(g_ceph_context, "mgr_osd_bytes",
- g_conf->get_val<Option::size_t>("mgr_osd_bytes"))),
+ g_conf().get_val<Option::size_t>("mgr_osd_bytes"))),
osd_msg_throttler(new Throttle(g_ceph_context, "mgr_osd_messsages",
- g_conf->get_val<uint64_t>("mgr_osd_messages"))),
+ g_conf().get_val<uint64_t>("mgr_osd_messages"))),
mds_byte_throttler(new Throttle(g_ceph_context, "mgr_mds_bytes",
- g_conf->get_val<Option::size_t>("mgr_mds_bytes"))),
+ g_conf().get_val<Option::size_t>("mgr_mds_bytes"))),
mds_msg_throttler(new Throttle(g_ceph_context, "mgr_mds_messsages",
- g_conf->get_val<uint64_t>("mgr_mds_messages"))),
+ g_conf().get_val<uint64_t>("mgr_mds_messages"))),
mon_byte_throttler(new Throttle(g_ceph_context, "mgr_mon_bytes",
- g_conf->get_val<Option::size_t>("mgr_mon_bytes"))),
+ g_conf().get_val<Option::size_t>("mgr_mon_bytes"))),
mon_msg_throttler(new Throttle(g_ceph_context, "mgr_mon_messsages",
- g_conf->get_val<uint64_t>("mgr_mon_messages"))),
+ g_conf().get_val<uint64_t>("mgr_mon_messages"))),
msgr(nullptr),
monc(monc_),
finisher(finisher_),
clog(clog_),
audit_clog(audit_clog_),
auth_cluster_registry(g_ceph_context,
- g_conf->auth_supported.empty() ?
- g_conf->auth_cluster_required :
- g_conf->auth_supported),
+ g_conf()->auth_supported.empty() ?
+ g_conf()->auth_cluster_required :
+ g_conf()->auth_supported),
auth_service_registry(g_ceph_context,
- g_conf->auth_supported.empty() ?
- g_conf->auth_service_required :
- g_conf->auth_supported),
+ g_conf()->auth_supported.empty() ?
+ g_conf()->auth_service_required :
+ g_conf()->auth_supported),
lock("DaemonServer"),
pgmap_ready(false)
{
- g_conf->add_observer(this);
+ g_conf().add_observer(this);
}
DaemonServer::~DaemonServer() {
delete msgr;
- g_conf->remove_observer(this);
+ g_conf().remove_observer(this);
}
int DaemonServer::init(uint64_t gid, entity_addrvec_t client_addrs)
{
// Initialize Messenger
- std::string public_msgr_type = g_conf->ms_public_type.empty() ?
- g_conf->get_val<std::string>("ms_type") : g_conf->ms_public_type;
+ std::string public_msgr_type = g_conf()->ms_public_type.empty() ?
+ g_conf().get_val<std::string>("ms_type") : g_conf()->ms_public_type;
msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MGR(gid),
"mgr",
std::string val;
cmd_getval(cct, cmdctx->cmdmap, "key", key);
cmd_getval(cct, cmdctx->cmdmap, "value", val);
- r = cct->_conf->set_val(key, val, &ss);
+ r = cct->_conf.set_val(key, val, &ss);
if (r == 0) {
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
}
cmdctx->reply(0, ss);
return true;
return true;
}
- double max_change = g_conf->get_val<double>("mon_reweight_max_change");
+ double max_change = g_conf().get_val<double>("mon_reweight_max_change");
cmd_getval(g_ceph_context, cmdctx->cmdmap, "max_change", max_change);
if (max_change <= 0.0) {
ss << "max_change " << max_change << " must be positive";
cmdctx->reply(-EINVAL, ss);
return true;
}
- int64_t max_osds = g_conf->get_val<int64_t>("mon_reweight_max_osds");
+ int64_t max_osds = g_conf().get_val<int64_t>("mon_reweight_max_osds");
cmd_getval(g_ceph_context, cmdctx->cmdmap, "max_osds", max_osds);
if (max_osds <= 0) {
ss << "max_osds " << max_osds << " must be positive";
void DaemonServer::_prune_pending_service_map()
{
utime_t cutoff = ceph_clock_now();
- cutoff -= g_conf->get_val<double>("mgr_service_beacon_grace");
+ cutoff -= g_conf().get_val<double>("mgr_service_beacon_grace");
auto p = pending_service_map.services.begin();
while (p != pending_service_map.services.end()) {
auto q = p->second.daemons.begin();
void DaemonServer::send_report()
{
if (!pgmap_ready) {
- if (ceph_clock_now() - started_at > g_conf->get_val<int64_t>("mgr_stats_period") * 4.0) {
+ if (ceph_clock_now() - started_at > g_conf().get_val<int64_t>("mgr_stats_period") * 4.0) {
pgmap_ready = true;
reported_osds.clear();
dout(1) << "Giving up on OSDs that haven't reported yet, sending "
assert(lock.is_locked_by_me());
auto configure = new MMgrConfigure();
- configure->stats_period = g_conf->get_val<int64_t>("mgr_stats_period");
- configure->stats_threshold = g_conf->get_val<int64_t>("mgr_stats_threshold");
+ configure->stats_period = g_conf().get_val<int64_t>("mgr_stats_period");
+ configure->stats_threshold = g_conf().get_val<int64_t>("mgr_stats_threshold");
c->send_message(configure);
}
m->service_name = service_name;
session->con->send_message(m);
utime_t timeout;
- timeout.set_from_double(cct->_conf->get_val<double>(
+ timeout.set_from_double(cct->_conf.get_val<double>(
"mgr_client_service_daemon_unregister_timeout"));
shutdown_cond.WaitInterval(lock, timeout);
}
if (last_connect_attempt != utime_t()) {
utime_t now = ceph_clock_now();
utime_t when = last_connect_attempt;
- when += cct->_conf->get_val<double>("mgr_connect_retry_interval");
+ when += cct->_conf.get_val<double>("mgr_connect_retry_interval");
if (now < when) {
if (!connect_retry_callback) {
connect_retry_callback = timer.add_event_at(
open->service_daemon = service_daemon;
open->daemon_metadata = daemon_metadata;
}
- cct->_conf->get_config_bl(0, &open->config_bl, &last_config_bl_version);
- cct->_conf->get_defaults_bl(&open->config_defaults_bl);
+ cct->_conf.get_config_bl(0, &open->config_bl, &last_config_bl_version);
+ cct->_conf.get_defaults_bl(&open->config_defaults_bl);
session->con->send_message(open);
}
}
report->daemon_health_metrics = std::move(daemon_health_metrics);
- cct->_conf->get_config_bl(last_config_bl_version, &report->config_bl,
+ cct->_conf.get_config_bl(last_config_bl_version, &report->config_bl,
&last_config_bl_version);
session->con->send_message(report);
monc{g_ceph_context},
client_messenger(Messenger::create(
g_ceph_context,
- cct->_conf->get_val<std::string>("ms_type"),
+ cct->_conf.get_val<std::string>("ms_type"),
entity_name_t::MGR(),
"mgr",
getpid(),
MMgrBeacon *m = new MMgrBeacon(monc.get_fsid(),
monc.get_global_id(),
- g_conf->name.get_id(),
+ g_conf()->name.get_id(),
addrs,
available,
std::move(module_info),
}
timer.add_event_after(
- g_conf->get_val<std::chrono::seconds>("mgr_tick_period").count(),
+ g_conf().get_val<std::chrono::seconds>("mgr_tick_period").count(),
new FunctionContext([this](int r){
tick();
}
derr << "I was active but no longer am" << dendl;
respawn();
} else {
- if (map.active_gid != 0 && map.active_name != g_conf->name.get_id()) {
+ if (map.active_gid != 0 && map.active_name != g_conf()->name.get_id()) {
// I am the standby and someone else is active, start modules
// in standby mode to do redirects if needed
if (!py_module_registry.is_standby_running()) {
#endif
// Configure sys.path to include mgr_module_path
string paths = (":" + get_site_packages() +
- ":" + g_conf->get_val<std::string>("mgr_module_path"));
+ ":" + g_conf().get_val<std::string>("mgr_module_path"));
#if PY_MAJOR_VERSION >= 3
wstring sys_path(Py_GetPath() + wstring(begin(paths), end(paths)));
PySys_SetPath(const_cast<wchar_t*>(sys_path.c_str()));
Py_SetProgramName(const_cast<char*>(PYTHON_EXECUTABLE));
#endif
// Add more modules
- if (g_conf->get_val<bool>("daemonize")) {
+ if (g_conf().get_val<bool>("daemonize")) {
PyImport_AppendInittab("ceph_logger", PyModule::init_ceph_logger);
}
PyImport_AppendInittab("ceph_module", PyModule::init_ceph_module);
std::set<std::string> PyModuleRegistry::probe_modules() const
{
- std::string path = g_conf->get_val<std::string>("mgr_module_path");
+ std::string path = g_conf().get_val<std::string>("mgr_module_path");
DIR *dir = opendir(path.c_str());
if (!dir) {
std::string exc_msg = peek_pyerror();
clog->error() << "Unhandled exception from module '" << get_name()
- << "' while running on mgr." << g_conf->name.get_id()
+ << "' while running on mgr." << g_conf()->name.get_id()
<< ": " << exc_msg;
derr << get_name() << ".serve:" << dendl;
derr << handle_pyerror() << dendl;
{
assert(mon->is_leader());
- max_global_id += g_conf->mon_globalid_prealloc;
+ max_global_id += g_conf()->mon_globalid_prealloc;
dout(10) << "increasing max_global_id to " << max_global_id << dendl;
Incremental inc;
inc.inc_type = GLOBAL_ID;
version_t AuthMonitor::get_trim_to() const
{
- unsigned max = g_conf->paxos_max_join_drift * 2;
+ unsigned max = g_conf()->paxos_max_join_drift * 2;
version_t version = get_last_committed();
if (mon->is_leader() && (version > max))
return version - max;
// bump the max?
while (mon->is_leader() &&
- (max_global_id < g_conf->mon_globalid_prealloc ||
- next_global_id >= max_global_id - g_conf->mon_globalid_prealloc / 2)) {
+ (max_global_id < g_conf()->mon_globalid_prealloc ||
+ next_global_id >= max_global_id - g_conf()->mon_globalid_prealloc / 2)) {
increase_max_global_id();
}
entity_name.get_type() == CEPH_ENTITY_TYPE_OSD ||
entity_name.get_type() == CEPH_ENTITY_TYPE_MDS ||
entity_name.get_type() == CEPH_ENTITY_TYPE_MGR) {
- if (g_conf->cephx_cluster_require_signatures ||
- g_conf->cephx_require_signatures) {
+ if (g_conf()->cephx_cluster_require_signatures ||
+ g_conf()->cephx_require_signatures) {
dout(1) << m->get_source_inst()
<< " supports cephx but not signatures and"
<< " 'cephx [cluster] require signatures = true';"
supported.erase(CEPH_AUTH_CEPHX);
}
} else {
- if (g_conf->cephx_service_require_signatures ||
- g_conf->cephx_require_signatures) {
+ if (g_conf()->cephx_service_require_signatures ||
+ g_conf()->cephx_require_signatures) {
dout(1) << m->get_source_inst()
<< " supports cephx but not signatures and"
<< " 'cephx [service] require signatures = true';"
entity_name.get_type() == CEPH_ENTITY_TYPE_OSD ||
entity_name.get_type() == CEPH_ENTITY_TYPE_MDS ||
entity_name.get_type() == CEPH_ENTITY_TYPE_MGR) {
- if (g_conf->cephx_cluster_require_version >= 2 ||
- g_conf->cephx_require_version >= 2) {
+ if (g_conf()->cephx_cluster_require_version >= 2 ||
+ g_conf()->cephx_require_version >= 2) {
dout(1) << m->get_source_inst()
<< " supports cephx but not v2 and"
<< " 'cephx [cluster] require version >= 2';"
supported.erase(CEPH_AUTH_CEPHX);
}
} else {
- if (g_conf->cephx_service_require_version >= 2 ||
- g_conf->cephx_require_version >= 2) {
+ if (g_conf()->cephx_service_require_version >= 2 ||
+ g_conf()->cephx_require_version >= 2) {
dout(1) << m->get_source_inst()
<< " supports cephx but not v2 and"
<< " 'cephx [service] require version >= 2';"
// they specified '-i <file>'
data = cmd->get_data();
}
- if (data.length() > (size_t) g_conf->mon_config_key_max_entry_size) {
+ if (data.length() > (size_t) g_conf()->mon_config_key_max_entry_size) {
ret = -EFBIG; // File too large
ss << "error: entry size limited to "
- << g_conf->mon_config_key_max_entry_size << " bytes. "
+ << g_conf()->mon_config_key_max_entry_size << " bytes. "
<< "Use 'mon config key max entry size' to manually adjust";
goto out;
}
if (prefix == "config help") {
string name;
cmd_getval(g_ceph_context, cmdmap, "key", name);
- const Option *opt = g_conf->find_option(name);
+ const Option *opt = g_conf().find_option(name);
if (!opt) {
ss << "configuration option '" << name << "' not recognized";
err = -ENOENT;
odata.append("\n");
goto reply;
}
- const Option *opt = g_conf->find_option(name);
+ const Option *opt = g_conf().find_option(name);
if (!opt) {
err = -ENOENT;
goto reply;
if (prefix == "config set") {
if (name.substr(0, 4) != "mgr/") {
- const Option *opt = g_conf->find_option(name);
+ const Option *opt = g_conf().find_option(name);
if (!opt) {
ss << "unrecognized config option '" << name << "'";
err = -EINVAL;
continue;
}
// a known and worthy option?
- const Option *o = g_conf->find_option(j.key);
+ const Option *o = g_conf().find_option(j.key);
if (!o ||
o->flags & Option::FLAG_NO_MON_UPDATE) {
goto skip;
section->options.insert(make_pair(name, std::move(mopt)));
++num;
} else {
- const Option *opt = g_conf->find_option(name);
+ const Option *opt = g_conf().find_option(name);
if (!opt) {
dout(10) << __func__ << " unrecognized option '" << name << "'" << dendl;
opt = new Option(name, Option::TYPE_STR, Option::LEVEL_UNKNOWN);
{
const OSDMap& osdmap = mon->osdmon()->osdmap;
map<string,string> crush_location;
- osdmap.crush->get_full_location(g_conf->host, &crush_location);
+ osdmap.crush->get_full_location(g_conf()->host, &crush_location);
map<string,string> out;
config_map.generate_entity_map(
- g_conf->name,
+ g_conf()->name,
crush_location,
osdmap.crush.get(),
string(), // no device class
&out);
- g_conf->set_mon_vals(g_ceph_context, out, nullptr);
+ g_conf().set_mon_vals(g_ceph_context, out, nullptr);
}
}
* Leader.
*/
expire_event = mon->timer.add_event_after(
- g_conf->mon_election_timeout + plus,
+ g_conf()->mon_election_timeout + plus,
new C_MonContext(mon, [this](int) {
expire();
}));
// assign a standby to rank 0 to avoid health warnings
std::string _name;
mds_gid_t gid = fsmap.find_replacement_for({fs->fscid, 0}, _name,
- g_conf->mon_force_standby_active);
+ g_conf()->mon_force_standby_active);
if (gid != MDS_GID_NONE) {
const auto &info = fsmap.get_info_gid(gid);
// snapshot of usage
DataStats stats;
- get_fs_stats(stats.fs_stats, g_conf->mon_data.c_str());
+ get_fs_stats(stats.fs_stats, g_conf()->mon_data.c_str());
map<string,uint64_t> extra;
uint64_t store_size = mon->store->get_estimated_size(extra);
assert(store_size > 0);
// MON_DISK_{LOW,CRIT,BIG}
health_check_map_t next;
- if (stats.fs_stats.avail_percent <= g_conf->mon_data_avail_crit) {
+ if (stats.fs_stats.avail_percent <= g_conf()->mon_data_avail_crit) {
stringstream ss, ss2;
ss << "mon%plurals% %names% %isorare% very low on available space";
auto& d = next.add("MON_DISK_CRIT", HEALTH_ERR, ss.str());
ss2 << "mon." << mon->name << " has " << stats.fs_stats.avail_percent
<< "% avail";
d.detail.push_back(ss2.str());
- } else if (stats.fs_stats.avail_percent <= g_conf->mon_data_avail_warn) {
+ } else if (stats.fs_stats.avail_percent <= g_conf()->mon_data_avail_warn) {
stringstream ss, ss2;
ss << "mon%plurals% %names% %isorare% low on available space";
auto& d = next.add("MON_DISK_LOW", HEALTH_WARN, ss.str());
<< "% avail";
d.detail.push_back(ss2.str());
}
- if (stats.store_stats.bytes_total >= g_conf->mon_data_size_warn) {
+ if (stats.store_stats.bytes_total >= g_conf()->mon_data_size_warn) {
stringstream ss, ss2;
ss << "mon%plurals% %names% %isorare% using a lot of disk space";
auto& d = next.add("MON_DISK_BIG", HEALTH_WARN, ss.str());
ss2 << "mon." << mon->name << " is "
<< byte_u_t(stats.store_stats.bytes_total)
<< " >= mon_data_size_warn ("
- << byte_u_t(g_conf->mon_data_size_warn) << ")";
+ << byte_u_t(g_conf()->mon_data_size_warn) << ")";
d.detail.push_back(ss2.str());
}
// There's also the obvious drawback that if this is set on a single
// monitor on a 3-monitor cluster, this warning will only be shown every
// third monitor connection.
- if (g_conf->mon_warn_on_osd_down_out_interval_zero &&
- g_conf->mon_osd_down_out_interval == 0) {
+ if (g_conf()->mon_warn_on_osd_down_out_interval_zero &&
+ g_conf()->mon_osd_down_out_interval == 0) {
ostringstream ss, ds;
ss << "mon%plurals% %names% %hasorhave% mon_osd_down_out_interval set to 0";
auto& d = next.add("OSD_NO_DOWN_OUT_INTERVAL", HEALTH_WARN, ss.str());
{
dout(10) << "create_initial -- creating initial map" << dendl;
LogEntry e;
- e.name = g_conf->name;
+ e.name = g_conf()->name;
e.rank = entity_name_t::MON(mon->rank);
e.addrs = mon->messenger->get_myaddrs();
e.stamp = ceph_clock_now();
if (channel.empty()) // keep retrocompatibility
channel = CLOG_CHANNEL_CLUSTER;
- if (g_conf->get_val<bool>("mon_cluster_log_to_stderr")) {
+ if (g_conf().get_val<bool>("mon_cluster_log_to_stderr")) {
cerr << channel << " " << le << std::endl;
}
}
summary.version++;
- summary.prune(g_conf->mon_log_max_summary);
+ summary.prune(g_conf()->mon_log_max_summary);
}
dout(15) << __func__ << " logging for "
if (!mon->is_leader())
return 0;
- unsigned max = g_conf->mon_max_log_epochs;
+ unsigned max = g_conf()->mon_max_log_epochs;
version_t version = get_last_committed();
if (version > max)
return version - max;
pending_log.insert(pair<utime_t,LogEntry>(p->stamp, *p));
}
}
- pending_summary.prune(g_conf->mon_log_max_summary);
+ pending_summary.prune(g_conf()->mon_log_max_summary);
wait_for_finished_proposal(op, new C_Log(this, op));
return true;
}
bool LogMonitor::should_propose(double& delay)
{
// commit now if we have a lot of pending events
- if (g_conf->mon_max_log_entries_per_event > 0 &&
- pending_log.size() >= (unsigned)g_conf->mon_max_log_entries_per_event)
+ if (g_conf()->mon_max_log_entries_per_event > 0 &&
+ pending_log.size() >= (unsigned)g_conf()->mon_max_log_entries_per_event)
return true;
// otherwise fall back to generic policy
le.channel = CLOG_CHANNEL_DEFAULT;
le.msg = str_join(logtext, " ");
pending_summary.add(le);
- pending_summary.prune(g_conf->mon_log_max_summary);
+ pending_summary.prune(g_conf()->mon_log_max_summary);
pending_log.insert(pair<utime_t,LogEntry>(le.stamp, le));
wait_for_finished_proposal(op, new Monitor::C_Command(
mon, op, 0, string(), get_last_committed() + 1));
channels.clear();
- int r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_syslog,
+ int r = get_conf_str_map_helper(g_conf()->mon_cluster_log_to_syslog,
oss, &channels.log_to_syslog,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_syslog_level,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_to_syslog_level,
oss, &channels.syslog_level,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_syslog_facility,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_to_syslog_facility,
oss, &channels.syslog_facility,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_file, oss,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_file, oss,
&channels.log_file,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_file_level, oss,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_file_level, oss,
&channels.log_file_level,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_graylog, oss,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_to_graylog, oss,
&channels.log_to_graylog,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_graylog_host, oss,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_to_graylog_host, oss,
&channels.log_to_graylog_host,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
return;
}
- r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_graylog_port, oss,
+ r = get_conf_str_map_helper(g_conf()->mon_cluster_log_to_graylog_port, oss,
&channels.log_to_graylog_port,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
if (graylogs.count(channel) == 0) {
auto graylog(std::make_shared<ceph::logging::Graylog>("mon"));
- graylog->set_fsid(g_conf->get_val<uuid_d>("fsid"));
- graylog->set_hostname(g_conf->host);
+ graylog->set_fsid(g_conf().get_val<uuid_d>("fsid"));
+ graylog->set_hostname(g_conf()->host);
graylog->set_destination(get_str_map_key(log_to_graylog_host, channel,
&CLOG_CONFIG_DEFAULT_KEY),
atoi(get_str_map_key(log_to_graylog_port, channel,
void init() override {
generic_dout(10) << "LogMonitor::init" << dendl;
- g_conf->add_observer(this);
+ g_conf().add_observer(this);
update_log_channels();
}
int sub_name_to_id(const string& n);
void on_shutdown() override {
- g_conf->remove_observer(this);
+ g_conf().remove_observer(this);
}
const char **get_tracked_conf_keys() const override {
// new map
dout(4) << "new map" << dendl;
print_map<0>(get_fsmap());
- if (!g_conf->mon_mds_skip_sanity) {
+ if (!g_conf()->mon_mds_skip_sanity) {
get_fsmap().sanity();
}
// print map iff 'debug mon = 30' or higher
print_map<30>(pending);
- if (!g_conf->mon_mds_skip_sanity) {
+ if (!g_conf()->mon_mds_skip_sanity) {
pending.sanity();
}
version_t MDSMonitor::get_trim_to() const
{
version_t floor = 0;
- if (g_conf->mon_mds_force_trim_to > 0 &&
- g_conf->mon_mds_force_trim_to < (int)get_last_committed()) {
- floor = g_conf->mon_mds_force_trim_to;
+ if (g_conf()->mon_mds_force_trim_to > 0 &&
+ g_conf()->mon_mds_force_trim_to < (int)get_last_committed()) {
+ floor = g_conf()->mon_mds_force_trim_to;
dout(10) << __func__ << " explicit mon_mds_force_trim_to = "
<< floor << dendl;
}
- unsigned max = g_conf->mon_max_mdsmap_epochs;
+ unsigned max = g_conf()->mon_max_mdsmap_epochs;
version_t last = get_last_committed();
if (last - get_first_committed() > max && floor < last - max)
// boot?
if (state == MDSMap::STATE_BOOT) {
// zap previous instance of this name?
- if (g_conf->mds_enforce_unique_name) {
+ if (g_conf()->mds_enforce_unique_name) {
bool failed_mds = false;
while (mds_gid_t existing = pending.find_mds_gid_by_name(m->get_name())) {
if (!mon->osdmon()->is_writeable()) {
<< info.rank << " damaged" << dendl;
utime_t until = ceph_clock_now();
- until += g_conf->get_val<double>("mon_mds_blacklist_interval");
+ until += g_conf().get_val<double>("mon_mds_blacklist_interval");
const auto blacklist_epoch = mon->osdmon()->blacklist(info.addrs, until);
request_proposal(mon->osdmon());
pending.damaged(gid, blacklist_epoch);
epoch_t blacklist_epoch = 0;
if (info.rank >= 0 && info.state != MDSMap::STATE_STANDBY_REPLAY) {
utime_t until = ceph_clock_now();
- until += g_conf->get_val<double>("mon_mds_blacklist_interval");
+ until += g_conf().get_val<double>("mon_mds_blacklist_interval");
blacklist_epoch = mon->osdmon()->blacklist(info.addrs, until);
}
mds++;
}
mds_gid_t newgid = fsmap.find_replacement_for({fscid, mds},
- name, g_conf->mon_force_standby_active);
+ name, g_conf()->mon_force_standby_active);
if (newgid == MDS_GID_NONE) {
return false;
}
mono_time now = mono_clock::now();
chrono::duration<double> since = now-latest_beacon;
const bool may_replace = since.count() <
- std::max(g_conf->mds_beacon_interval, g_conf->mds_beacon_grace * 0.5);
+ std::max(g_conf()->mds_beacon_interval, g_conf()->mds_beacon_grace * 0.5);
// are we in?
// and is there a non-laggy standby that can take over for us?
may_replace &&
!fsmap.get_filesystem(fscid)->mds_map.test_flag(CEPH_MDSMAP_NOT_JOINABLE) &&
(sgid = fsmap.find_replacement_for({fscid, info.rank}, info.name,
- g_conf->mon_force_standby_active)) != MDS_GID_NONE)
+ g_conf()->mon_force_standby_active)) != MDS_GID_NONE)
{
MDSMap::mds_info_t si = fsmap.get_info_gid(sgid);
while (p != failed.end()) {
mds_rank_t f = *p++;
mds_gid_t sgid = fsmap.find_replacement_for({fs->fscid, f}, {},
- g_conf->mon_force_standby_active);
+ g_conf()->mon_force_standby_active);
if (sgid) {
const MDSMap::mds_info_t si = fsmap.get_info_gid(sgid);
dout(0) << " taking over failed mds." << f << " with " << sgid
chrono::duration<double> since_last = now-last_tick;
if (since_last.count() >
- (g_conf->mds_beacon_grace - g_conf->mds_beacon_interval)) {
+ (g_conf()->mds_beacon_grace - g_conf()->mds_beacon_interval)) {
// This case handles either local slowness (calls being delayed
// for whatever reason) or cluster election slowness (a long gap
// between calls while an election happened)
}
- if (since_last.count() >= g_conf->mds_beacon_grace) {
+ if (since_last.count() >= g_conf()->mds_beacon_grace) {
auto &info = pending.get_info_gid(gid);
dout(1) << "no beacon from mds." << info.rank << "." << info.inc
<< " (gid: " << gid << " addr: " << info.addrs
version_t MgrMonitor::get_trim_to() const
{
- int64_t max = g_conf->get_val<int64_t>("mon_max_mgrmap_epochs");
+ int64_t max = g_conf().get_val<int64_t>("mon_max_mgrmap_epochs");
if (map.epoch > max) {
return map.epoch - max;
}
void MgrMonitor::create_initial()
{
// Take a local copy of initial_modules for tokenizer to iterate over.
- auto initial_modules = g_conf->get_val<std::string>("mgr_initial_modules");
+ auto initial_modules = g_conf().get_val<std::string>("mgr_initial_modules");
boost::tokenizer<> tok(initial_modules);
for (auto& m : tok) {
pending_map.modules.insert(m);
// no OSDs are ever created.
if (ever_had_active_mgr ||
(mon->osdmon()->osdmap.get_num_osds() > 0 &&
- now > mon->monmap->created + g_conf->get_val<int64_t>("mon_mgr_mkfs_grace"))) {
+ now > mon->monmap->created + g_conf().get_val<int64_t>("mon_mgr_mkfs_grace"))) {
health_status_t level = HEALTH_WARN;
if (first_seen_inactive != utime_t() &&
- now - first_seen_inactive > g_conf->get_val<int64_t>("mon_mgr_inactive_grace")) {
+ now - first_seen_inactive > g_conf().get_val<int64_t>("mon_mgr_inactive_grace")) {
level = HEALTH_ERR;
}
return level;
timer:
digest_event = mon->timer.add_event_after(
- g_conf->get_val<int64_t>("mon_mgr_digest_period"),
+ g_conf().get_val<int64_t>("mon_mgr_digest_period"),
new C_MonContext(mon, [this](int) {
send_digests();
}));
const auto now = ceph::coarse_mono_clock::now();
const auto mgr_beacon_grace =
- g_conf->get_val<std::chrono::seconds>("mon_mgr_beacon_grace");
+ g_conf().get_val<std::chrono::seconds>("mon_mgr_beacon_grace");
// Note that this is the mgr daemon's tick period, not ours (the
// beacon is sent with this period).
const auto mgr_tick_period =
- g_conf->get_val<std::chrono::seconds>("mgr_tick_period");
+ g_conf().get_val<std::chrono::seconds>("mgr_tick_period");
if (last_tick != ceph::coarse_mono_clock::time_point::min()
&& (now - last_tick > (mgr_beacon_grace - mgr_tick_period))) {
!ever_had_active_mgr &&
should_warn_about_mgr_down() != HEALTH_OK) {
dout(10) << " exceeded mon_mgr_mkfs_grace "
- << g_conf->get_val<int64_t>("mon_mgr_mkfs_grace")
+ << g_conf().get_val<int64_t>("mon_mgr_mkfs_grace")
<< " seconds" << dendl;
propose = true;
}
want_monmap(true),
had_a_connection(false),
reopen_interval_multiplier(
- cct_->_conf->get_val<double>("mon_client_hunt_interval_min_multiple")),
+ cct_->_conf.get_val<double>("mon_client_hunt_interval_min_multiple")),
last_mon_command_tid(0),
version_req_id(0)
{}
messenger = nullptr;
if (!monmap.fsid.is_zero()) {
- cct->_conf->set_val("fsid", stringify(monmap.fsid));
+ cct->_conf.set_val("fsid", stringify(monmap.fsid));
}
out:
{
ldout(cct,10) << __func__ << " " << *m << dendl;
finisher.queue(new FunctionContext([this, m](int r) {
- cct->_conf->set_mon_vals(cct, m->config, config_cb);
+ cct->_conf.set_mon_vals(cct, m->config, config_cb);
m->put();
}));
got_config = true;
{
// un-backoff our reconnect interval
reopen_interval_multiplier = std::max(
- cct->_conf->get_val<double>("mon_client_hunt_interval_min_multiple"),
+ cct->_conf.get_val<double>("mon_client_hunt_interval_min_multiple"),
reopen_interval_multiplier /
- cct->_conf->get_val<double>("mon_client_hunt_interval_backoff"));
+ cct->_conf.get_val<double>("mon_client_hunt_interval_backoff"));
ldout(cct, 20) << __func__ << " reopen_interval_multipler now "
<< reopen_interval_multiplier << dendl;
}
int MonMap::build_initial(CephContext *cct, ostream& errout)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
// file?
- const auto monmap = conf->get_val<std::string>("monmap");
+ const auto monmap = conf.get_val<std::string>("monmap");
if (!monmap.empty()) {
int r;
try {
}
// fsid from conf?
- const auto new_fsid = conf->get_val<uuid_d>("fsid");
+ const auto new_fsid = conf.get_val<uuid_d>("fsid");
if (!new_fsid.is_zero()) {
fsid = new_fsid;
}
// -m foo?
- const auto mon_host = conf->get_val<std::string>("mon_host");
+ const auto mon_host = conf.get_val<std::string>("mon_host");
if (!mon_host.empty()) {
int r = build_from_host_list(mon_host, "noname-");
if (r < 0) {
// What monitors are in the config file?
std::vector <std::string> sections;
- int ret = conf->get_all_sections(sections);
+ int ret = conf.get_all_sections(sections);
if (ret) {
errout << "Unable to find any monitors in the configuration "
<< "file, because there was an error listing the sections. error "
sections.push_back("mon");
sections.push_back("global");
std::string val;
- int res = conf->get_val_from_conf_file(sections, "mon addr", val, true);
+ int res = conf.get_val_from_conf_file(sections, "mon addr", val, true);
if (res) {
errout << "failed to get an address for mon." << *m << ": error "
<< res << std::endl;
addr.set_port(CEPH_MON_PORT_LEGACY);
uint16_t priority = 0;
- if (!conf->get_val_from_conf_file(sections, "mon priority", val, false)) {
+ if (!conf.get_val_from_conf_file(sections, "mon priority", val, false)) {
try {
priority = std::stoul(val);
} catch (std::logic_error&) {
if (size() == 0) {
// no info found from conf options lets try use DNS SRV records
- string srv_name = conf->get_val<std::string>("mon_dns_srv_name");
+ string srv_name = conf.get_val<std::string>("mon_dns_srv_name");
string domain;
// check if domain is also provided and extract it from srv_name
size_t idx = srv_name.find("_");
lock("Monitor::lock"),
timer(cct_, lock),
finisher(cct_, "mon_finisher", "fin"),
- cpu_tp(cct, "Monitor::cpu_tp", "cpu_tp", g_conf->mon_cpu_threads),
+ cpu_tp(cct, "Monitor::cpu_tp", "cpu_tp", g_conf()->mon_cpu_threads),
has_ever_joined(false),
logger(NULL), cluster_logger(NULL), cluster_logger_registered(false),
monmap(map),
paxos_service(PAXOS_NUM),
admin_hook(NULL),
routed_request_tid(0),
- op_tracker(cct, g_conf->get_val<bool>("mon_enable_op_tracker"), 1)
+ op_tracker(cct, g_conf().get_val<bool>("mon_enable_op_tracker"), 1)
{
clog = log_client.create_channel(CLOG_CHANNEL_CLUSTER);
audit_clog = log_client.create_channel(CLOG_CHANNEL_AUDIT);
update_log_clients();
op_tracker.set_complaint_and_threshold(
- g_conf->get_val<std::chrono::seconds>("mon_op_complaint_time").count(),
- g_conf->get_val<int64_t>("mon_op_log_threshold"));
+ g_conf().get_val<std::chrono::seconds>("mon_op_complaint_time").count(),
+ g_conf().get_val<int64_t>("mon_op_log_threshold"));
op_tracker.set_history_size_and_duration(
- g_conf->get_val<uint64_t>("mon_op_history_size"),
- g_conf->get_val<std::chrono::seconds>("mon_op_history_duration").count());
+ g_conf().get_val<uint64_t>("mon_op_history_size"),
+ g_conf().get_val<std::chrono::seconds>("mon_op_history_duration").count());
op_tracker.set_history_slow_op_size_and_threshold(
- g_conf->get_val<uint64_t>("mon_op_history_slow_op_size"),
- g_conf->get_val<std::chrono::seconds>("mon_op_history_slow_op_threshold").count());
+ g_conf().get_val<uint64_t>("mon_op_history_slow_op_size"),
+ g_conf().get_val<std::chrono::seconds>("mon_op_history_slow_op_threshold").count());
paxos = new Paxos(this, "paxos");
return KEYS;
}
-void Monitor::handle_conf_change(const md_config_t *conf,
+void Monitor::handle_conf_change(const md_config_t *mconf,
const std::set<std::string> &changed)
{
sanitize_options();
}
if (changed.count("mon_scrub_interval")) {
+ ConfigReader conf{mconf};
scrub_update_interval(conf->mon_scrub_interval);
}
}
// mon_lease must be greater than mon_lease_renewal; otherwise we
// may incur in leases expiring before they are renewed.
- if (g_conf->mon_lease_renew_interval_factor >= 1.0) {
+ if (g_conf()->mon_lease_renew_interval_factor >= 1.0) {
clog->error() << "mon_lease_renew_interval_factor ("
- << g_conf->mon_lease_renew_interval_factor
+ << g_conf()->mon_lease_renew_interval_factor
<< ") must be less than 1.0";
r = -EINVAL;
}
// with the same value, for a given small vale, could mean timing out if
// the monitors happened to be overloaded -- or even under normal load for
// a small enough value.
- if (g_conf->mon_lease_ack_timeout_factor <= 1.0) {
+ if (g_conf()->mon_lease_ack_timeout_factor <= 1.0) {
clog->error() << "mon_lease_ack_timeout_factor ("
- << g_conf->mon_lease_ack_timeout_factor
+ << g_conf()->mon_lease_ack_timeout_factor
<< ") must be greater than 1.0";
r = -EINVAL;
}
if (!has_ever_joined) {
// impose initial quorum restrictions?
list<string> initial_members;
- get_str_list(g_conf->mon_initial_members, initial_members);
+ get_str_list(g_conf()->mon_initial_members, initial_members);
if (!initial_members.empty()) {
dout(1) << " initial_members " << initial_members << ", filtering seed monmap" << dendl;
} else if (!monmap->contains(name)) {
derr << "not in monmap and have been in a quorum before; "
<< "must have been removed" << dendl;
- if (g_conf->mon_force_quorum_join) {
+ if (g_conf()->mon_force_quorum_join) {
dout(0) << "we should have died but "
<< "'mon_force_quorum_join' is set -- allowing boot" << dendl;
} else {
}
}
- string keyring_loc = g_conf->mon_data + "/keyring";
+ string keyring_loc = g_conf()->mon_data + "/keyring";
r = keyring.load(cct, keyring_loc);
if (r < 0) {
keyring.encode_plaintext(bl);
write_default_keyring(bl);
} else {
- derr << "unable to load initial keyring " << g_conf->keyring << dendl;
+ derr << "unable to load initial keyring " << g_conf()->keyring << dendl;
lock.Unlock();
return r;
}
lock.Lock();
// add ourselves as a conf observer
- g_conf->add_observer(this);
+ g_conf().add_observer(this);
lock.Unlock();
return 0;
state = STATE_SHUTDOWN;
- g_conf->remove_observer(this);
+ g_conf().remove_observer(this);
if (admin_hook) {
cct->get_admin_socket()->unregister_commands(admin_hook);
_reset();
// sync store
- if (g_conf->mon_compact_on_bootstrap) {
+ if (g_conf()->mon_compact_on_bootstrap) {
dout(10) << "bootstrap -- triggering compaction" << dendl;
store->compact();
dout(10) << "bootstrap -- finished compaction" << dendl;
store->apply_transaction(t);
- assert(g_conf->mon_sync_requester_kill_at != 1);
+ assert(g_conf()->mon_sync_requester_kill_at != 1);
// clear the underlying store
set<string> targets = get_sync_targets_names();
// deciding a partial or no sync is needed.
paxos->init();
- assert(g_conf->mon_sync_requester_kill_at != 2);
+ assert(g_conf()->mon_sync_requester_kill_at != 2);
}
// assume 'other' as the leader. We will update the leader once we receive
if (sync_timeout_event)
timer.cancel_event(sync_timeout_event);
sync_timeout_event = timer.add_event_after(
- g_conf->mon_sync_timeout,
+ g_conf()->mon_sync_timeout,
new C_MonContext(this, [this](int) {
sync_timeout();
}));
{
dout(10) << __func__ << " lc " << last_committed << " from " << sync_provider << dendl;
- assert(g_conf->mon_sync_requester_kill_at != 7);
+ assert(g_conf()->mon_sync_requester_kill_at != 7);
if (sync_full) {
// finalize the paxos commits
store->apply_transaction(tx);
}
- assert(g_conf->mon_sync_requester_kill_at != 8);
+ assert(g_conf()->mon_sync_requester_kill_at != 8);
auto t(std::make_shared<MonitorDBStore::Transaction>());
t->erase("mon_sync", "in_sync");
t->erase("mon_sync", "last_committed_floor");
store->apply_transaction(t);
- assert(g_conf->mon_sync_requester_kill_at != 9);
+ assert(g_conf()->mon_sync_requester_kill_at != 9);
init_paxos();
- assert(g_conf->mon_sync_requester_kill_at != 10);
+ assert(g_conf()->mon_sync_requester_kill_at != 10);
bootstrap();
}
return;
}
- assert(g_conf->mon_sync_provider_kill_at != 1);
+ assert(g_conf()->mon_sync_provider_kill_at != 1);
// make sure they can understand us.
if ((required_features ^ m->get_connection()->get_features()) &
SyncProvider& sp = sync_providers[cookie];
sp.cookie = cookie;
sp.entity = m->get_source_inst();
- sp.reset_timeout(g_ceph_context, g_conf->mon_sync_timeout * 2);
+ sp.reset_timeout(g_ceph_context, g_conf()->mon_sync_timeout * 2);
set<string> sync_targets;
if (m->op == MMonSync::OP_GET_COOKIE_FULL) {
return;
}
- assert(g_conf->mon_sync_provider_kill_at != 2);
+ assert(g_conf()->mon_sync_provider_kill_at != 2);
SyncProvider& sp = sync_providers[m->cookie];
- sp.reset_timeout(g_ceph_context, g_conf->mon_sync_timeout * 2);
+ sp.reset_timeout(g_ceph_context, g_conf()->mon_sync_timeout * 2);
if (sp.last_committed < paxos->get_first_committed() &&
paxos->get_first_committed() > 1) {
MMonSync *reply = new MMonSync(MMonSync::OP_CHUNK, sp.cookie);
auto tx(std::make_shared<MonitorDBStore::Transaction>());
- int left = g_conf->mon_sync_max_payload_size;
+ int left = g_conf()->mon_sync_max_payload_size;
while (sp.last_committed < paxos->get_version() && left > 0) {
bufferlist bl;
sp.last_committed++;
<< " key " << sp.last_key << dendl;
reply->op = MMonSync::OP_LAST_CHUNK;
- assert(g_conf->mon_sync_provider_kill_at != 3);
+ assert(g_conf()->mon_sync_provider_kill_at != 3);
// clean up our local state
sync_providers.erase(sp.cookie);
sync_reset_timeout();
sync_get_next_chunk();
- assert(g_conf->mon_sync_requester_kill_at != 3);
+ assert(g_conf()->mon_sync_requester_kill_at != 3);
}
void Monitor::sync_get_next_chunk()
{
dout(20) << __func__ << " cookie " << sync_cookie << " provider " << sync_provider << dendl;
- if (g_conf->mon_inject_sync_get_chunk_delay > 0) {
- dout(20) << __func__ << " injecting delay of " << g_conf->mon_inject_sync_get_chunk_delay << dendl;
- usleep((long long)(g_conf->mon_inject_sync_get_chunk_delay * 1000000.0));
+ if (g_conf()->mon_inject_sync_get_chunk_delay > 0) {
+ dout(20) << __func__ << " injecting delay of " << g_conf()->mon_inject_sync_get_chunk_delay << dendl;
+ usleep((long long)(g_conf()->mon_inject_sync_get_chunk_delay * 1000000.0));
}
MMonSync *r = new MMonSync(MMonSync::OP_GET_CHUNK, sync_cookie);
messenger->send_message(r, sync_provider);
- assert(g_conf->mon_sync_requester_kill_at != 4);
+ assert(g_conf()->mon_sync_requester_kill_at != 4);
}
void Monitor::handle_sync_chunk(MonOpRequestRef op)
}
assert(state == STATE_SYNCHRONIZING);
- assert(g_conf->mon_sync_requester_kill_at != 5);
+ assert(g_conf()->mon_sync_requester_kill_at != 5);
auto tx(std::make_shared<MonitorDBStore::Transaction>());
tx->append_from_encoded(m->chunk_bl);
store->apply_transaction(tx);
- assert(g_conf->mon_sync_requester_kill_at != 6);
+ assert(g_conf()->mon_sync_requester_kill_at != 6);
if (!sync_full) {
dout(10) << __func__ << " applying recent paxos transactions as we go" << dendl;
probe_timeout_event = new C_MonContext(this, [this](int r) {
probe_timeout(r);
});
- double t = g_conf->mon_probe_timeout;
+ double t = g_conf()->mon_probe_timeout;
if (timer.add_event_after(t, probe_timeout_event)) {
dout(10) << "reset_probe_timeout " << probe_timeout_event
<< " after " << t << " seconds" << dendl;
sync_start(other, true);
return;
}
- if (paxos->get_version() + g_conf->paxos_max_join_drift < m->paxos_last_version) {
+ if (paxos->get_version() + g_conf()->paxos_max_join_drift < m->paxos_last_version) {
dout(10) << " peer paxos last version " << m->paxos_last_version
<< " vs my version " << paxos->get_version()
<< " (too far ahead)"
f->close_section();
}
- if (g_conf->mon_sync_provider_kill_at > 0)
- f->dump_int("provider_kill_at", g_conf->mon_sync_provider_kill_at);
- if (g_conf->mon_sync_requester_kill_at > 0)
- f->dump_int("requester_kill_at", g_conf->mon_sync_requester_kill_at);
+ if (g_conf()->mon_sync_provider_kill_at > 0)
+ f->dump_int("provider_kill_at", g_conf()->mon_sync_provider_kill_at);
+ if (g_conf()->mon_sync_requester_kill_at > 0)
+ f->dump_int("requester_kill_at", g_conf()->mon_sync_requester_kill_at);
f->open_object_section("monmap");
monmap->dump(f);
const char *sep2)
{
health_status_t r = HEALTH_OK;
- bool compat = g_conf->mon_health_preluminous_compat;
- bool compat_warn = g_conf->get_val<bool>("mon_health_preluminous_compat_warning");
+ bool compat = g_conf()->mon_health_preluminous_compat;
+ bool compat_warn = g_conf().get_val<bool>("mon_health_preluminous_compat_warning");
if (f) {
f->open_object_section("health");
f->open_object_section("checks");
const health_check_map_t& previous,
MonitorDBStore::TransactionRef t)
{
- if (!g_conf->mon_health_to_clog) {
+ if (!g_conf()->mon_health_to_clog) {
return;
}
dout(10) << __func__ << " updated " << updated.checks.size()
<< " previous " << previous.checks.size()
<< dendl;
- const auto min_log_period = g_conf->get_val<int64_t>(
+ const auto min_log_period = g_conf().get_val<int64_t>(
"mon_health_log_update_period");
for (auto& p : updated.checks) {
auto q = previous.checks.find(p.first);
bool Monitor::is_keyring_required()
{
- string auth_cluster_required = g_conf->auth_supported.empty() ?
- g_conf->auth_cluster_required : g_conf->auth_supported;
- string auth_service_required = g_conf->auth_supported.empty() ?
- g_conf->auth_service_required : g_conf->auth_supported;
+ string auth_cluster_required = g_conf()->auth_supported.empty() ?
+ g_conf()->auth_cluster_required : g_conf()->auth_supported;
+ string auth_service_required = g_conf()->auth_supported.empty() ?
+ g_conf()->auth_service_required : g_conf()->auth_supported;
return auth_service_required == "cephx" ||
auth_cluster_required == "cephx";
if (mon_cmd->is_mgr()) {
const auto& hdr = m->get_header();
uint64_t size = hdr.front_len + hdr.middle_len + hdr.data_len;
- uint64_t max = g_conf->get_val<uint64_t>("mon_client_bytes")
- * g_conf->get_val<double>("mon_mgr_proxy_client_bytes_ratio");
+ uint64_t max = g_conf().get_val<uint64_t>("mon_client_bytes")
+ * g_conf().get_val<double>("mon_mgr_proxy_client_bytes_ratio");
if (mgr_proxy_bytes + size > max) {
dout(10) << __func__ << " current mgr proxy bytes " << mgr_proxy_bytes
<< " + " << size << " > max " << max << dendl;
double duration = std::chrono::duration<double>(end-start).count();
dout(1) << "finished manual compaction in " << duration << " seconds" << dendl;
ostringstream oss;
- oss << "compacted " << g_conf->get_val<std::string>("mon_keyvaluedb") << " in " << duration << " seconds";
+ oss << "compacted " << g_conf().get_val<std::string>("mon_keyvaluedb") << " in " << duration << " seconds";
rs = oss.str();
r = 0;
}
if (!injected_args.empty()) {
dout(0) << "parsing injected options '" << injected_args << "'" << dendl;
ostringstream oss;
- r = g_conf->injectargs(str_join(injected_args, " "), &oss);
+ r = g_conf().injectargs(str_join(injected_args, " "), &oss);
ss << "injectargs:" << oss.str();
rs = ss.str();
goto out;
cmd_getval(cct, cmdmap, "key", key);
std::string val;
cmd_getval(cct, cmdmap, "value", val);
- r = g_conf->set_val(key, val, &ss);
+ r = g_conf().set_val(key, val, &ss);
if (r == 0) {
- g_conf->apply_changes(nullptr);
+ g_conf().apply_changes(nullptr);
}
rs = ss.str();
goto out;
assert(s);
s->session_timeout = ceph_clock_now();
- s->session_timeout += g_conf->mon_session_timeout;
+ s->session_timeout += g_conf()->mon_session_timeout;
if (s->auth_handler) {
s->entity_name = s->auth_handler->get_entity_name();
if (timecheck_round % 2) {
dout(10) << __func__ << " there's a timecheck going on" << dendl;
utime_t curr_time = ceph_clock_now();
- double max = g_conf->mon_timecheck_interval*3;
+ double max = g_conf()->mon_timecheck_interval*3;
if (curr_time - timecheck_round_start < max) {
dout(10) << __func__ << " keep current round going" << dendl;
goto out;
if (timecheck_has_skew(skew_bound, &abs_skew)) {
status = HEALTH_WARN;
ss << "clock skew " << abs_skew << "s"
- << " > max " << g_conf->mon_clock_drift_allowed << "s";
+ << " > max " << g_conf()->mon_clock_drift_allowed << "s";
}
return status;
ConnectionRef con = m->get_connection();
if (!con->has_feature(CEPH_FEATURE_MON_STATEFUL_SUB))
m->get_connection()->send_message(new MMonSubscribeAck(
- monmap->get_fsid(), (int)g_conf->mon_subscribe_interval));
+ monmap->get_fsid(), (int)g_conf()->mon_subscribe_interval));
}
}
dout(15) << __func__ << " reset timeout event" << dendl;
scrub_cancel_timeout();
scrub_timeout_event = timer.add_event_after(
- g_conf->mon_scrub_timeout,
+ g_conf()->mon_scrub_timeout,
new C_MonContext(this, [this](int) {
scrub_timeout();
}));
/************ TICK ***************/
void Monitor::new_tick()
{
- timer.add_event_after(g_conf->mon_tick_interval, new C_MonContext(this, [this](int) {
+ timer.add_event_after(g_conf()->mon_tick_interval, new C_MonContext(this, [this](int) {
tick();
}));
}
// Check if we need to emit any delayed health check updated messages
if (is_leader()) {
- const auto min_period = g_conf->get_val<int64_t>(
+ const auto min_period = g_conf().get_val<int64_t>(
"mon_health_log_update_period");
for (auto& svc : paxos_service) {
auto health = svc->get_health_checks();
auto p = session_map.sessions.begin();
bool out_for_too_long = (!exited_quorum.is_zero() &&
- now > (exited_quorum + 2*g_conf->mon_lease));
+ now > (exited_quorum + 2*g_conf()->mon_lease));
while (!p.end()) {
MonSession *s = *p;
if (s->session_timeout < now && s->con) {
// check keepalive, too
s->session_timeout = s->con->get_last_keepalive();
- s->session_timeout += g_conf->mon_session_timeout;
+ s->session_timeout += g_conf()->mon_session_timeout;
}
if (s->session_timeout < now) {
dout(10) << " trimming session " << s->con << " " << s->name
utime_t oldest_secs;
const utime_t now = ceph_clock_now();
auto too_old = now;
- too_old -= g_conf->get_val<std::chrono::seconds>("mon_op_complaint_time").count();
+ too_old -= g_conf().get_val<std::chrono::seconds>("mon_op_complaint_time").count();
int slow = 0;
TrackedOpRef oldest_op;
auto count_slow_ops = [&](TrackedOp& op) {
KeyRing keyring;
string keyring_filename;
- r = ceph_resolve_file_search(g_conf->keyring, keyring_filename);
+ r = ceph_resolve_file_search(g_conf()->keyring, keyring_filename);
if (r) {
- derr << "unable to find a keyring file on " << g_conf->keyring
+ derr << "unable to find a keyring file on " << g_conf()->keyring
<< ": " << cpp_strerror(r) << dendl;
- if (g_conf->key != "") {
- string keyring_plaintext = "[mon.]\n\tkey = " + g_conf->key +
+ if (g_conf()->key != "") {
+ string keyring_plaintext = "[mon.]\n\tkey = " + g_conf()->key +
"\n\tcaps mon = \"allow *\"\n";
bufferlist bl;
bl.append(keyring_plaintext);
} else {
r = keyring.load(g_ceph_context, keyring_filename);
if (r < 0) {
- derr << "unable to load initial keyring " << g_conf->keyring << dendl;
+ derr << "unable to load initial keyring " << g_conf()->keyring << dendl;
return r;
}
}
int Monitor::write_default_keyring(bufferlist& bl)
{
ostringstream os;
- os << g_conf->mon_data << "/keyring";
+ os << g_conf()->mon_data << "/keyring";
int err = 0;
int fd = ::open(os.str().c_str(), O_WRONLY|O_CREAT, 0600);
double abs_skew = std::fabs(skew_bound);
if (abs)
*abs = abs_skew;
- return (abs_skew > g_conf->mon_clock_drift_allowed);
+ return (abs_skew > g_conf()->mon_clock_drift_allowed);
}
/**
KeyValueDB::Transaction dbt = db->get_transaction();
if (do_dump) {
- if (!g_conf->mon_debug_dump_json) {
+ if (!g_conf()->mon_debug_dump_json) {
bufferlist bl;
t->encode(bl);
bl.write_fd(dump_fd_binary);
* We will now randomly inject random delays. We can safely sleep prior
* to applying the transaction as it won't break the model.
*/
- double delay_prob = g_conf->mon_inject_transaction_delay_probability;
+ double delay_prob = g_conf()->mon_inject_transaction_delay_probability;
if (delay_prob && (rand() % 10000 < delay_prob * 10000.0)) {
utime_t delay;
- double delay_max = g_conf->mon_inject_transaction_delay_max;
+ double delay_max = g_conf()->mon_inject_transaction_delay_max;
delay.set_from_double(delay_max * (double)(rand() % 10000) / 10000.0);
lsubdout(g_ceph_context, mon, 1)
<< "apply_transaction will be delayed for " << delay
last_key.first = prefix;
last_key.second = key;
- if (g_conf->mon_sync_debug) {
+ if (g_conf()->mon_sync_debug) {
encode(prefix, crc_bl);
encode(key, crc_bl);
encode(value, crc_bl);
public:
__u32 crc() {
- if (g_conf->mon_sync_debug)
+ if (g_conf()->mon_sync_debug)
return crc_bl.crc32c(0);
return 0;
}
}
db.reset(db_ptr);
- if (g_conf->mon_debug_dump_transactions) {
- if (!g_conf->mon_debug_dump_json) {
+ if (g_conf()->mon_debug_dump_transactions) {
+ if (!g_conf()->mon_debug_dump_json) {
dump_fd_binary = ::open(
- g_conf->mon_debug_dump_location.c_str(),
+ g_conf()->mon_debug_dump_location.c_str(),
O_CREAT|O_APPEND|O_WRONLY, 0644);
if (dump_fd_binary < 0) {
dump_fd_binary = -errno;
} else {
dump_fmt.reset();
dump_fmt.open_array_section("dump");
- dump_fd_json.open(g_conf->mon_debug_dump_location.c_str());
+ dump_fd_json.open(g_conf()->mon_debug_dump_location.c_str());
}
do_dump = true;
}
if (kv_type == "rocksdb")
- db->init(g_conf->mon_rocksdb_options);
+ db->init(g_conf()->mon_rocksdb_options);
else
db->init();
string kv_type;
int r = read_meta("kv_backend", &kv_type);
if (r < 0) {
- kv_type = g_conf->mon_keyvaluedb;
+ kv_type = g_conf()->mon_keyvaluedb;
r = write_meta("kv_backend", kv_type);
if (r < 0)
return r;
~MonitorDBStore() {
assert(!is_open);
if (do_dump) {
- if (!g_conf->mon_debug_dump_json) {
+ if (!g_conf()->mon_debug_dump_json) {
::close(dump_fd_binary);
} else {
dump_fmt.close_section();
pending_map = *mon->monmap;
pending_map.epoch = 1;
- if (g_conf->mon_debug_no_initial_persistent_features) {
+ if (g_conf()->mon_debug_no_initial_persistent_features) {
derr << __func__ << " mon_debug_no_initial_persistent_features=true"
<< dendl;
} else {
const string& service_name)
: PaxosService(mn, p, service_name),
cct(cct),
- inc_osd_cache(g_conf->mon_osd_cache_size),
- full_osd_cache(g_conf->mon_osd_cache_size),
+ inc_osd_cache(g_conf()->mon_osd_cache_size),
+ full_osd_cache(g_conf()->mon_osd_cache_size),
has_osdmap_manifest(false),
mapper(mn->cct, &mn->cpu_tp)
{}
newmap.flags |=
CEPH_OSDMAP_RECOVERY_DELETES |
CEPH_OSDMAP_PURGED_SNAPDIRS;
- newmap.full_ratio = g_conf->mon_osd_full_ratio;
+ newmap.full_ratio = g_conf()->mon_osd_full_ratio;
if (newmap.full_ratio > 1.0) newmap.full_ratio /= 100;
- newmap.backfillfull_ratio = g_conf->mon_osd_backfillfull_ratio;
+ newmap.backfillfull_ratio = g_conf()->mon_osd_backfillfull_ratio;
if (newmap.backfillfull_ratio > 1.0) newmap.backfillfull_ratio /= 100;
- newmap.nearfull_ratio = g_conf->mon_osd_nearfull_ratio;
+ newmap.nearfull_ratio = g_conf()->mon_osd_nearfull_ratio;
if (newmap.nearfull_ratio > 1.0) newmap.nearfull_ratio /= 100;
// new cluster should require latest by default
- if (g_conf->get_val<bool>("mon_debug_no_require_nautilus")) {
- if (g_conf->mon_debug_no_require_mimic) {
+ if (g_conf().get_val<bool>("mon_debug_no_require_nautilus")) {
+ if (g_conf()->mon_debug_no_require_mimic) {
derr << __func__ << " mon_debug_no_require_mimic=true and nautilus=true" << dendl;
newmap.require_osd_release = CEPH_RELEASE_LUMINOUS;
} else {
} else {
newmap.require_osd_release = CEPH_RELEASE_NAUTILUS;
int r = ceph_release_from_name(
- g_conf->mon_osd_initial_require_min_compat_client.c_str());
+ g_conf()->mon_osd_initial_require_min_compat_client.c_str());
if (r <= 0) {
assert(0 == "mon_osd_initial_require_min_compat_client is not valid");
}
t->erase("mkfs", "osdmap");
}
- if (tx_size > g_conf->mon_sync_max_payload_size*2) {
+ if (tx_size > g_conf()->mon_sync_max_payload_size*2) {
mon->store->apply_transaction(t);
t = MonitorDBStore::TransactionRef();
tx_size = 0;
if (!osdmap.get_pools().empty()) {
auto fin = new C_UpdateCreatingPGs(this, osdmap.get_epoch());
mapping_job = mapping.start_update(osdmap, mapper,
- g_conf->mon_osd_mapping_pgs_per_chunk);
+ g_conf()->mon_osd_mapping_pgs_per_chunk);
dout(10) << __func__ << " started mapping job " << mapping_job.get()
<< " at " << fin->start << dendl;
mapping_job->set_finish_event(fin);
// safety checks (this shouldn't really happen)
{
if (osdmap.backfillfull_ratio <= 0) {
- pending_inc.new_backfillfull_ratio = g_conf->mon_osd_backfillfull_ratio;
+ pending_inc.new_backfillfull_ratio = g_conf()->mon_osd_backfillfull_ratio;
if (pending_inc.new_backfillfull_ratio > 1.0)
pending_inc.new_backfillfull_ratio /= 100;
dout(1) << __func__ << " setting backfillfull_ratio = "
<< pending_inc.new_backfillfull_ratio << dendl;
}
if (osdmap.full_ratio <= 0) {
- pending_inc.new_full_ratio = g_conf->mon_osd_full_ratio;
+ pending_inc.new_full_ratio = g_conf()->mon_osd_full_ratio;
if (pending_inc.new_full_ratio > 1.0)
pending_inc.new_full_ratio /= 100;
dout(1) << __func__ << " setting full_ratio = "
<< pending_inc.new_full_ratio << dendl;
}
if (osdmap.nearfull_ratio <= 0) {
- pending_inc.new_nearfull_ratio = g_conf->mon_osd_nearfull_ratio;
+ pending_inc.new_nearfull_ratio = g_conf()->mon_osd_nearfull_ratio;
if (pending_inc.new_nearfull_ratio > 1.0)
pending_inc.new_nearfull_ratio /= 100;
dout(1) << __func__ << " setting nearfull_ratio = "
}
// process queue
- unsigned max = std::max<int64_t>(1, g_conf->mon_osd_max_creating_pgs);
+ unsigned max = std::max<int64_t>(1, g_conf()->mon_osd_max_creating_pgs);
const auto total = pending_creatings.pgs.size();
while (pending_creatings.pgs.size() < max &&
!pending_creatings.queue.empty()) {
unsigned estimate =
mapping.get_osd_acting_pgs(*osds.begin()).size() * osds.size();
if (estimate > mapping.get_num_pgs() *
- g_conf->mon_osd_prime_pg_temp_max_estimate) {
+ g_conf()->mon_osd_prime_pg_temp_max_estimate) {
dout(10) << __func__ << " estimate " << estimate << " pgs on "
<< osds.size() << " osds >= "
- << g_conf->mon_osd_prime_pg_temp_max_estimate << " of total "
+ << g_conf()->mon_osd_prime_pg_temp_max_estimate << " of total "
<< mapping.get_num_pgs() << " pgs, all"
<< dendl;
all = true;
dout(10) << __func__ << " no pools, no pg_temp priming" << dendl;
} else if (all) {
PrimeTempJob job(next, this);
- mapper.queue(&job, g_conf->mon_osd_mapping_pgs_per_chunk);
- if (job.wait_for(g_conf->mon_osd_prime_pg_temp_max_time)) {
+ mapper.queue(&job, g_conf()->mon_osd_mapping_pgs_per_chunk);
+ if (job.wait_for(g_conf()->mon_osd_prime_pg_temp_max_time)) {
dout(10) << __func__ << " done in " << job.get_duration() << dendl;
} else {
dout(10) << __func__ << " did not finish in "
- << g_conf->mon_osd_prime_pg_temp_max_time
+ << g_conf()->mon_osd_prime_pg_temp_max_time
<< ", stopping" << dendl;
job.abort();
}
} else {
dout(10) << __func__ << " " << osds.size() << " interesting osds" << dendl;
utime_t stop = ceph_clock_now();
- stop += g_conf->mon_osd_prime_pg_temp_max_time;
+ stop += g_conf()->mon_osd_prime_pg_temp_max_time;
const int chunk = 1000;
int n = chunk;
std::unordered_set<pg_t> did_pgs;
n = chunk;
if (ceph_clock_now() > stop) {
dout(10) << __func__ << " consumed more than "
- << g_conf->mon_osd_prime_pg_temp_max_time
+ << g_conf()->mon_osd_prime_pg_temp_max_time
<< " seconds, stopping"
<< dendl;
return;
<< mapping_job.get() << " is prior epoch "
<< mapping.get_epoch() << dendl;
} else {
- if (g_conf->mon_osd_prime_pg_temp) {
+ if (g_conf()->mon_osd_prime_pg_temp) {
maybe_prime_pg_temp();
}
}
- } else if (g_conf->mon_osd_prime_pg_temp) {
+ } else if (g_conf()->mon_osd_prime_pg_temp) {
dout(1) << __func__ << " skipping prime_pg_temp; mapping job did not start"
<< dendl;
}
}
}
- if (g_conf->get_val<bool>("mon_debug_block_osdmap_trim")) {
+ if (g_conf().get_val<bool>("mon_debug_block_osdmap_trim")) {
dout(0) << __func__
<< " blocking osdmap trim"
" ('mon_debug_block_osdmap_trim' set to 'true')"
{
epoch_t floor = get_min_last_epoch_clean();
dout(10) << " min_last_epoch_clean " << floor << dendl;
- if (g_conf->mon_osd_force_trim_to > 0 &&
- g_conf->mon_osd_force_trim_to < (int)get_last_committed()) {
- floor = g_conf->mon_osd_force_trim_to;
+ if (g_conf()->mon_osd_force_trim_to > 0 &&
+ g_conf()->mon_osd_force_trim_to < (int)get_last_committed()) {
+ floor = g_conf()->mon_osd_force_trim_to;
dout(10) << " explicit mon_osd_force_trim_to = " << floor << dendl;
}
- unsigned min = g_conf->mon_min_osdmap_epochs;
+ unsigned min = g_conf()->mon_min_osdmap_epochs;
if (floor + min > get_last_committed()) {
if (min < get_last_committed())
floor = get_last_committed() - min;
version_t first = get_first_committed();
version_t last = get_last_committed();
version_t min_osdmap_epochs =
- g_conf->get_val<int64_t>("mon_min_osdmap_epochs");
+ g_conf().get_val<int64_t>("mon_min_osdmap_epochs");
version_t prune_min =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_min");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_min");
version_t prune_interval =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_interval");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_interval");
version_t last_pinned = osdmap_manifest.get_last_pinned();
version_t last_to_pin = last - min_osdmap_epochs;
bool OSDMonitor::_prune_sanitize_options() const
{
uint64_t prune_interval =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_interval");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_interval");
uint64_t prune_min =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_min");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_min");
uint64_t txsize =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_txsize");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_txsize");
bool r = true;
}
bool OSDMonitor::is_prune_enabled() const {
- return g_conf->get_val<bool>("mon_osdmap_full_prune_enabled");
+ return g_conf().get_val<bool>("mon_osdmap_full_prune_enabled");
}
bool OSDMonitor::is_prune_supported() const {
version_t first = get_first_committed();
version_t last = get_last_committed();
- version_t last_to_pin = last - g_conf->mon_min_osdmap_epochs;
+ version_t last_to_pin = last - g_conf()->mon_min_osdmap_epochs;
version_t last_pinned = osdmap_manifest.get_last_pinned();
uint64_t prune_interval =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_interval");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_interval");
uint64_t txsize =
- g_conf->get_val<uint64_t>("mon_osdmap_full_prune_txsize");
+ g_conf().get_val<uint64_t>("mon_osdmap_full_prune_txsize");
prune_init();
MOSDMap *reply = new MOSDMap(mon->monmap->fsid, features);
epoch_t first = get_first_committed();
epoch_t last = osdmap.get_epoch();
- int max = g_conf->osd_map_message_max;
+ int max = g_conf()->osd_map_message_max;
for (epoch_t e = std::max(first, m->get_full_first());
e <= std::min(last, m->get_full_last()) && max > 0;
++e, --max) {
}
int up = osdmap.get_num_up_osds() - pending_inc.get_net_marked_down(&osdmap);
float up_ratio = (float)up / (float)num_osds;
- if (up_ratio < g_conf->mon_osd_min_up_ratio) {
+ if (up_ratio < g_conf()->mon_osd_min_up_ratio) {
dout(2) << __func__ << " current up_ratio " << up_ratio << " < min "
- << g_conf->mon_osd_min_up_ratio
+ << g_conf()->mon_osd_min_up_ratio
<< ", will not mark osd." << i << " down" << dendl;
return false;
}
}
int in = osdmap.get_num_in_osds() - pending_inc.get_net_marked_out(&osdmap);
float in_ratio = (float)in / (float)num_osds;
- if (in_ratio < g_conf->mon_osd_min_in_ratio) {
+ if (in_ratio < g_conf()->mon_osd_min_in_ratio) {
if (i >= 0)
dout(5) << __func__ << " current in_ratio " << in_ratio << " < min "
- << g_conf->mon_osd_min_in_ratio
+ << g_conf()->mon_osd_min_in_ratio
<< ", will not mark osd." << i << " out" << dendl;
else
dout(5) << __func__ << " current in_ratio " << in_ratio << " < min "
- << g_conf->mon_osd_min_in_ratio
+ << g_conf()->mon_osd_min_in_ratio
<< ", will not mark osds out" << dendl;
return false;
}
}
set<string> reporters_by_subtree;
- auto reporter_subtree_level = g_conf->get_val<string>("mon_osd_reporter_subtree_level");
- utime_t orig_grace(g_conf->osd_heartbeat_grace, 0);
+ auto reporter_subtree_level = g_conf().get_val<string>("mon_osd_reporter_subtree_level");
+ utime_t orig_grace(g_conf()->osd_heartbeat_grace, 0);
utime_t max_failed_since = fi.get_failed_since();
utime_t failed_for = now - max_failed_since;
utime_t grace = orig_grace;
double my_grace = 0, peer_grace = 0;
double decay_k = 0;
- if (g_conf->mon_osd_adjust_heartbeat_grace) {
- double halflife = (double)g_conf->mon_osd_laggy_halflife;
+ if (g_conf()->mon_osd_adjust_heartbeat_grace) {
+ double halflife = (double)g_conf()->mon_osd_laggy_halflife;
decay_k = ::log(.5) / halflife;
// scale grace period based on historical probability of 'lagginess'
} else {
reporters_by_subtree.insert(iter->second);
}
- if (g_conf->mon_osd_adjust_heartbeat_grace) {
+ if (g_conf()->mon_osd_adjust_heartbeat_grace) {
const osd_xinfo_t& xi = osdmap.get_xinfo(p->first);
utime_t elapsed = now - xi.down_stamp;
double decay = exp((double)elapsed * decay_k);
}
}
- if (g_conf->mon_osd_adjust_heartbeat_grace) {
+ if (g_conf()->mon_osd_adjust_heartbeat_grace) {
peer_grace /= (double)fi.reporters.size();
grace += peer_grace;
}
<< dendl;
if (failed_for >= grace &&
- reporters_by_subtree.size() >= g_conf->get_val<uint64_t>("mon_osd_min_down_reporters")) {
+ reporters_by_subtree.size() >= g_conf().get_val<uint64_t>("mon_osd_min_down_reporters")) {
dout(1) << " we have enough reporters to mark osd." << target_osd
<< " down" << dendl;
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
osd_xinfo_t xi = osdmap.get_xinfo(from);
if (m->boot_epoch == 0) {
- xi.laggy_probability *= (1.0 - g_conf->mon_osd_laggy_weight);
- xi.laggy_interval *= (1.0 - g_conf->mon_osd_laggy_weight);
+ xi.laggy_probability *= (1.0 - g_conf()->mon_osd_laggy_weight);
+ xi.laggy_interval *= (1.0 - g_conf()->mon_osd_laggy_weight);
dout(10) << " not laggy, new xi " << xi << dendl;
} else {
if (xi.down_stamp.sec()) {
int interval = ceph_clock_now().sec() -
xi.down_stamp.sec();
- if (g_conf->mon_osd_laggy_max_interval &&
- (interval > g_conf->mon_osd_laggy_max_interval)) {
- interval = g_conf->mon_osd_laggy_max_interval;
+ if (g_conf()->mon_osd_laggy_max_interval &&
+ (interval > g_conf()->mon_osd_laggy_max_interval)) {
+ interval = g_conf()->mon_osd_laggy_max_interval;
}
xi.laggy_interval =
- interval * g_conf->mon_osd_laggy_weight +
- xi.laggy_interval * (1.0 - g_conf->mon_osd_laggy_weight);
+ interval * g_conf()->mon_osd_laggy_weight +
+ xi.laggy_interval * (1.0 - g_conf()->mon_osd_laggy_weight);
}
xi.laggy_probability =
- g_conf->mon_osd_laggy_weight +
- xi.laggy_probability * (1.0 - g_conf->mon_osd_laggy_weight);
+ g_conf()->mon_osd_laggy_weight +
+ xi.laggy_probability * (1.0 - g_conf()->mon_osd_laggy_weight);
dout(10) << " laggy, now xi " << xi << dendl;
}
xi.features = m->get_connection()->get_features();
// mark in?
- if ((g_conf->mon_osd_auto_mark_auto_out_in &&
+ if ((g_conf()->mon_osd_auto_mark_auto_out_in &&
(oldstate & CEPH_OSD_AUTOOUT)) ||
- (g_conf->mon_osd_auto_mark_new_in && (oldstate & CEPH_OSD_NEW)) ||
- (g_conf->mon_osd_auto_mark_in)) {
+ (g_conf()->mon_osd_auto_mark_new_in && (oldstate & CEPH_OSD_NEW)) ||
+ (g_conf()->mon_osd_auto_mark_in)) {
if (can_mark_in(from)) {
if (osdmap.osd_xinfo[from].old_weight > 0) {
pending_inc.new_weight[from] = osdmap.osd_xinfo[from].old_weight;
}
while (first <= osdmap.get_epoch()) {
- epoch_t last = std::min<epoch_t>(first + g_conf->osd_map_message_max - 1,
+ epoch_t last = std::min<epoch_t>(first + g_conf()->osd_map_message_max - 1,
osdmap.get_epoch());
MOSDMap *m = build_incremental(first, last, features);
ceph_assert(err == 0);
// this block performs paranoid checks on map retrieval
- if (g_conf->get_val<bool>("mon_debug_extra_checks") &&
+ if (g_conf().get_val<bool>("mon_debug_extra_checks") &&
inc.full_crc != 0) {
uint64_t f = encode_features;
/* can_mark_out() checks if we can mark osds as being out. The -1 has no
* influence at all. The decision is made based on the ratio of "in" osds,
* and the function returns false if this ratio is lower that the minimum
- * ratio set by g_conf->mon_osd_min_in_ratio. So it's not really up to us.
+ * ratio set by g_conf()->mon_osd_min_in_ratio. So it's not really up to us.
*/
if (can_mark_out(-1)) {
set<int> down_cache; // quick cache of down subtrees
if (osdmap.is_down(o) &&
osdmap.is_in(o) &&
can_mark_out(o)) {
- utime_t orig_grace(g_conf->mon_osd_down_out_interval, 0);
+ utime_t orig_grace(g_conf()->mon_osd_down_out_interval, 0);
utime_t grace = orig_grace;
double my_grace = 0.0;
- if (g_conf->mon_osd_adjust_down_out_interval) {
+ if (g_conf()->mon_osd_adjust_down_out_interval) {
// scale grace period the same way we do the heartbeat grace.
const osd_xinfo_t& xi = osdmap.get_xinfo(o);
- double halflife = (double)g_conf->mon_osd_laggy_halflife;
+ double halflife = (double)g_conf()->mon_osd_laggy_halflife;
double decay_k = ::log(.5) / halflife;
double decay = exp((double)down * decay_k);
dout(20) << "osd." << o << " laggy halflife " << halflife << " decay_k " << decay_k
}
// is this an entire large subtree down?
- if (g_conf->mon_osd_down_out_subtree_limit.length()) {
- int type = osdmap.crush->get_type_id(g_conf->mon_osd_down_out_subtree_limit);
+ if (g_conf()->mon_osd_down_out_subtree_limit.length()) {
+ int type = osdmap.crush->get_type_id(g_conf()->mon_osd_down_out_subtree_limit);
if (type > 0) {
if (osdmap.containing_subtree_is_down(cct, o, type, &down_cache)) {
- dout(10) << "tick entire containing " << g_conf->mon_osd_down_out_subtree_limit
+ dout(10) << "tick entire containing " << g_conf()->mon_osd_down_out_subtree_limit
<< " subtree for osd." << o << " is down; resetting timer" << dendl;
// reset timer, too.
down_pending_out[o] = now;
}
bool down_out = !osdmap.is_destroyed(o) &&
- g_conf->mon_osd_down_out_interval > 0 && down.sec() >= grace;
+ g_conf()->mon_osd_down_out_interval > 0 && down.sec() >= grace;
bool destroyed_out = osdmap.is_destroyed(o) &&
- g_conf->mon_osd_destroyed_out_interval > 0 &&
+ g_conf()->mon_osd_destroyed_out_interval > 0 &&
// this is not precise enough as we did not make a note when this osd
// was marked as destroyed, but let's not bother with that
// complexity for now.
- down.sec() >= g_conf->mon_osd_destroyed_out_interval;
+ down.sec() >= g_conf()->mon_osd_destroyed_out_interval;
if (down_out || destroyed_out) {
dout(10) << "tick marking osd." << o << " OUT after " << down
<< " sec (target " << grace << " = " << orig_grace << " + " << my_grace << ")" << dendl;
bool OSDMonitor::handle_osd_timeouts(const utime_t &now,
std::map<int,utime_t> &last_osd_report)
{
- utime_t timeo(g_conf->mon_osd_report_timeout, 0);
+ utime_t timeo(g_conf()->mon_osd_report_timeout, 0);
if (now - mon->get_leader_since() < timeo) {
// We haven't been the leader for long enough to consider OSD timeouts
return false;
return false; // we already pruned for this epoch
}
- unsigned max_prune = cct->_conf->get_val<uint64_t>(
+ unsigned max_prune = cct->_conf.get_val<uint64_t>(
"mon_max_snap_prune_per_epoch");
if (!max_prune) {
max_prune = 100000;
ErasureCodeProfile::const_iterator plugin = profile.find("plugin");
check_legacy_ec_plugin(plugin->second, profilename);
int err = instance.factory(plugin->second,
- g_conf->get_val<std::string>("erasure_code_dir"),
+ g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, ss);
if (err) {
return err;
check_legacy_ec_plugin(plugin->second, erasure_code_profile);
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
return instance.factory(plugin->second,
- g_conf->get_val<std::string>("erasure_code_dir"),
+ g_conf().get_val<std::string>("erasure_code_dir"),
profile, erasure_code, ss);
}
map<string,string> *erasure_code_profile_map,
ostream *ss)
{
- int r = g_conf->with_val<string>("osd_pool_default_erasure_code_profile",
+ int r = g_conf().with_val<string>("osd_pool_default_erasure_code_profile",
get_json_str_map,
*ss,
erasure_code_profile_map,
int err = 0;
switch (pool_type) {
case pg_pool_t::TYPE_REPLICATED:
- *size = g_conf->get_val<uint64_t>("osd_pool_default_size");
- *min_size = g_conf->get_osd_pool_default_min_size();
+ *size = g_conf().get_val<uint64_t>("osd_pool_default_size");
+ *min_size = g_conf().get_osd_pool_default_min_size();
break;
case pg_pool_t::TYPE_ERASURE:
{
if (err)
break;
uint32_t data_chunks = erasure_code->get_data_chunk_count();
- uint32_t stripe_unit = g_conf->get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
+ uint32_t stripe_unit = g_conf().get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
auto it = profile.find("stripe_unit");
if (it != profile.end()) {
string err_str;
int OSDMonitor::check_pg_num(int64_t pool, int pg_num, int size, ostream *ss)
{
- auto max_pgs_per_osd = g_conf->get_val<uint64_t>("mon_max_pg_per_osd");
+ auto max_pgs_per_osd = g_conf().get_val<uint64_t>("mon_max_pg_per_osd");
auto num_osds = std::max(osdmap.get_num_in_osds(), 3u); // assume min cluster size 3
auto max_pgs = max_pgs_per_osd * num_osds;
uint64_t projected = 0;
if (name.length() == 0)
return -EINVAL;
if (pg_num == 0)
- pg_num = g_conf->get_val<uint64_t>("osd_pool_default_pg_num");
+ pg_num = g_conf().get_val<uint64_t>("osd_pool_default_pg_num");
if (pgp_num == 0)
- pgp_num = g_conf->get_val<uint64_t>("osd_pool_default_pgp_num");
- if (pg_num > g_conf->get_val<uint64_t>("mon_max_pool_pg_num")) {
+ pgp_num = g_conf().get_val<uint64_t>("osd_pool_default_pgp_num");
+ if (pg_num > g_conf().get_val<uint64_t>("mon_max_pool_pg_num")) {
*ss << "'pg_num' must be greater than 0 and less than or equal to "
- << g_conf->get_val<uint64_t>("mon_max_pool_pg_num")
+ << g_conf().get_val<uint64_t>("mon_max_pool_pg_num")
<< " (you may adjust 'mon max pool pg num' for higher values)";
return -ERANGE;
}
dout(10) << "prepare_pool_crush_rule returns " << r << dendl;
return r;
}
- if (g_conf->mon_osd_crush_smoke_test) {
+ if (g_conf()->mon_osd_crush_smoke_test) {
CrushWrapper newcrush;
_get_pending_crush(newcrush);
ostringstream err;
tester.set_max_x(50);
tester.set_rule(crush_rule);
auto start = ceph::coarse_mono_clock::now();
- r = tester.test_with_fork(g_conf->mon_lease);
+ r = tester.test_with_fork(g_conf()->mon_lease);
auto duration = ceph::coarse_mono_clock::now() - start;
if (r < 0) {
dout(10) << "tester.test_with_fork returns " << r
fread = true;
break;
case FAST_READ_DEFAULT:
- fread = g_conf->mon_osd_pool_ec_fast_read;
+ fread = g_conf()->mon_osd_pool_ec_fast_read;
break;
default:
*ss << "invalid fast_read setting: " << fast_read;
pi->create_time = ceph_clock_now();
pi->type = pool_type;
pi->fast_read = fread;
- pi->flags = g_conf->osd_pool_default_flags;
- if (g_conf->osd_pool_default_flag_hashpspool)
+ pi->flags = g_conf()->osd_pool_default_flags;
+ if (g_conf()->osd_pool_default_flag_hashpspool)
pi->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
- if (g_conf->osd_pool_default_flag_nodelete)
+ if (g_conf()->osd_pool_default_flag_nodelete)
pi->set_flag(pg_pool_t::FLAG_NODELETE);
- if (g_conf->osd_pool_default_flag_nopgchange)
+ if (g_conf()->osd_pool_default_flag_nopgchange)
pi->set_flag(pg_pool_t::FLAG_NOPGCHANGE);
- if (g_conf->osd_pool_default_flag_nosizechange)
+ if (g_conf()->osd_pool_default_flag_nosizechange)
pi->set_flag(pg_pool_t::FLAG_NOSIZECHANGE);
- if (g_conf->osd_pool_use_gmt_hitset)
+ if (g_conf()->osd_pool_use_gmt_hitset)
pi->use_gmt_hitset = true;
else
pi->use_gmt_hitset = false;
}
pi->stripe_width = stripe_width;
pi->cache_target_dirty_ratio_micro =
- g_conf->osd_pool_default_cache_target_dirty_ratio * 1000000;
+ g_conf()->osd_pool_default_cache_target_dirty_ratio * 1000000;
pi->cache_target_dirty_high_ratio_micro =
- g_conf->osd_pool_default_cache_target_dirty_high_ratio * 1000000;
+ g_conf()->osd_pool_default_cache_target_dirty_high_ratio * 1000000;
pi->cache_target_full_ratio_micro =
- g_conf->osd_pool_default_cache_target_full_ratio * 1000000;
- pi->cache_min_flush_age = g_conf->osd_pool_default_cache_min_flush_age;
- pi->cache_min_evict_age = g_conf->osd_pool_default_cache_min_evict_age;
+ g_conf()->osd_pool_default_cache_target_full_ratio * 1000000;
+ pi->cache_min_flush_age = g_conf()->osd_pool_default_cache_min_flush_age;
+ pi->cache_min_evict_age = g_conf()->osd_pool_default_cache_min_evict_age;
pending_inc.new_pool_names[pool] = name;
return 0;
}
return -EEXIST;
return 0;
}
- if (static_cast<uint64_t>(n) > g_conf->get_val<uint64_t>("mon_max_pool_pg_num")) {
+ if (static_cast<uint64_t>(n) > g_conf().get_val<uint64_t>("mon_max_pool_pg_num")) {
ss << "'pg_num' must be greater than 0 and less than or equal to "
- << g_conf->get_val<uint64_t>("mon_max_pool_pg_num")
+ << g_conf().get_val<uint64_t>("mon_max_pool_pg_num")
<< " (you may adjust 'mon max pool pg num' for higher values)";
return -ERANGE;
}
}
int expected_osds = std::min(p.get_pg_num(), osdmap.get_num_osds());
int64_t new_pgs = n - p.get_pg_num();
- if (new_pgs > g_conf->mon_osd_max_split_count * expected_osds) {
+ if (new_pgs > g_conf()->mon_osd_max_split_count * expected_osds) {
ss << "specified pg_num " << n << " is too large (creating "
<< new_pgs << " new PGs on ~" << expected_osds
<< " OSDs exceeds per-OSD max with mon_osd_max_split_count of "
- << g_conf->mon_osd_max_split_count << ')';
+ << g_conf()->mon_osd_max_split_count << ')';
return -E2BIG;
}
p.set_pg_num(n);
return err;
if (val == "bloom") {
BloomHitSet::Params *bsp = new BloomHitSet::Params;
- bsp->set_fpp(g_conf->get_val<double>("osd_pool_default_hit_set_bloom_fpp"));
+ bsp->set_fpp(g_conf().get_val<double>("osd_pool_default_hit_set_bloom_fpp"));
p.hit_set_params = HitSet::Params(bsp);
} else if (val == "explicit_hash")
p.hit_set_params = HitSet::Params(new ExplicitHashHitSet::Params);
return -EINVAL;
}
stringstream err;
- if (!g_conf->mon_debug_no_require_bluestore_for_ec_overwrites &&
+ if (!g_conf()->mon_debug_no_require_bluestore_for_ec_overwrites &&
!is_pool_currently_all_bluestore(pool, p, &err)) {
ss << "pool must only be stored on bluestore for scrubbing to work: " << err.str();
return -EINVAL;
goto reply;
}
- if (g_conf->mon_osd_crush_smoke_test) {
+ if (g_conf()->mon_osd_crush_smoke_test) {
// sanity check: test some inputs to make sure this map isn't
// totally broken
dout(10) << " testing map" << dendl;
tester.set_min_x(0);
tester.set_max_x(50);
auto start = ceph::coarse_mono_clock::now();
- int r = tester.test_with_fork(g_conf->mon_lease);
+ int r = tester.test_with_fork(g_conf()->mon_lease);
auto duration = ceph::coarse_mono_clock::now() - start;
if (r < 0) {
dout(10) << " tester.test_with_fork returns " << r
goto reply;
}
- if (newmax > g_conf->mon_max_osd) {
+ if (newmax > g_conf()->mon_max_osd) {
err = -ERANGE;
ss << "cannot set max_osd to " << newmax << " which is > conf.mon_max_osd ("
- << g_conf->mon_max_osd << ")";
+ << g_conf()->mon_max_osd << ")";
goto reply;
}
double d;
// default one hour
cmd_getval(cct, cmdmap, "expire", d,
- g_conf->mon_osd_blacklist_default_expire);
+ g_conf()->mon_osd_blacklist_default_expire);
expires += d;
pending_inc.new_blacklist[addr] = expires;
string pool_type_str;
cmd_getval(cct, cmdmap, "pool_type", pool_type_str);
if (pool_type_str.empty())
- pool_type_str = g_conf->get_val<string>("osd_pool_default_type");
+ pool_type_str = g_conf().get_val<string>("osd_pool_default_type");
string poolstr;
cmd_getval(cct, cmdmap, "pool", poolstr);
}
if ((!tp->removed_snaps.empty() || !tp->snaps.empty()) &&
((force_nonempty != "--force-nonempty") ||
- (!g_conf->mon_debug_unsafe_allow_tier_with_nonempty_snaps))) {
+ (!g_conf()->mon_debug_unsafe_allow_tier_with_nonempty_snaps))) {
ss << "tier pool '" << tierpoolstr << "' has snapshot state; it cannot be added as a tier without breaking the pool";
err = -ENOTEMPTY;
goto reply;
err = -ENOTEMPTY;
goto reply;
}
- auto& modestr = g_conf->get_val<string>("osd_tier_default_cache_mode");
+ auto& modestr = g_conf().get_val<string>("osd_tier_default_cache_mode");
pg_pool_t::cache_mode_t mode = pg_pool_t::get_cache_mode_from_str(modestr);
if (mode < 0) {
ss << "osd tier cache default mode '" << modestr << "' is not a valid cache mode";
}
HitSet::Params hsp;
auto& cache_hit_set_type =
- g_conf->get_val<string>("osd_tier_default_cache_hit_set_type");
+ g_conf().get_val<string>("osd_tier_default_cache_hit_set_type");
if (cache_hit_set_type == "bloom") {
BloomHitSet::Params *bsp = new BloomHitSet::Params;
- bsp->set_fpp(g_conf->get_val<double>("osd_pool_default_hit_set_bloom_fpp"));
+ bsp->set_fpp(g_conf().get_val<double>("osd_pool_default_hit_set_bloom_fpp"));
hsp = HitSet::Params(bsp);
} else if (cache_hit_set_type == "explicit_hash") {
hsp = HitSet::Params(new ExplicitHashHitSet::Params);
ntp->set_last_force_op_resend(pending_inc.epoch);
ntp->tier_of = pool_id;
ntp->cache_mode = mode;
- ntp->hit_set_count = g_conf->get_val<uint64_t>("osd_tier_default_cache_hit_set_count");
- ntp->hit_set_period = g_conf->get_val<uint64_t>("osd_tier_default_cache_hit_set_period");
- ntp->min_read_recency_for_promote = g_conf->get_val<uint64_t>("osd_tier_default_cache_min_read_recency_for_promote");
- ntp->min_write_recency_for_promote = g_conf->get_val<uint64_t>("osd_tier_default_cache_min_write_recency_for_promote");
- ntp->hit_set_grade_decay_rate = g_conf->get_val<uint64_t>("osd_tier_default_cache_hit_set_grade_decay_rate");
- ntp->hit_set_search_last_n = g_conf->get_val<uint64_t>("osd_tier_default_cache_hit_set_search_last_n");
+ ntp->hit_set_count = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_count");
+ ntp->hit_set_period = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_period");
+ ntp->min_read_recency_for_promote = g_conf().get_val<uint64_t>("osd_tier_default_cache_min_read_recency_for_promote");
+ ntp->min_write_recency_for_promote = g_conf().get_val<uint64_t>("osd_tier_default_cache_min_write_recency_for_promote");
+ ntp->hit_set_grade_decay_rate = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_grade_decay_rate");
+ ntp->hit_set_search_last_n = g_conf().get_val<uint64_t>("osd_tier_default_cache_hit_set_search_last_n");
ntp->hit_set_params = hsp;
ntp->target_max_bytes = size;
ss << "pool '" << tierpoolstr << "' is now (or already was) a cache tier of '" << poolstr << "'";
return -EBUSY;
}
- if (!g_conf->mon_allow_pool_delete) {
+ if (!g_conf()->mon_allow_pool_delete) {
*ss << "pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool";
return -EPERM;
}
return 0;
}
- if (g_conf->mon_fake_pool_delete && !no_fake) {
+ if (g_conf()->mon_fake_pool_delete && !no_fake) {
string old_name = osdmap.get_pool_name(pool);
string new_name = old_name + "." + stringify(pool) + ".DELETED";
dout(1) << __func__ << " faking pool deletion: renaming " << pool << " "
stamp_delta += delta_t;
pg_sum_delta.stats.add(d.stats);
auto smooth_intervals =
- cct ? cct->_conf->get_val<uint64_t>("mon_stat_smooth_intervals") : 1;
+ cct ? cct->_conf.get_val<uint64_t>("mon_stat_smooth_intervals") : 1;
if (pg_sum_deltas.size() > smooth_intervals) {
pg_sum_delta.stats.sub(pg_sum_deltas.front().first.stats);
stamp_delta -= pg_sum_deltas.front().second;
*result_ts_delta += delta_t;
result_pool_delta->stats.add(d.stats);
}
- size_t s = cct ? cct->_conf->get_val<uint64_t>("mon_stat_smooth_intervals") : 1;
+ size_t s = cct ? cct->_conf.get_val<uint64_t>("mon_stat_smooth_intervals") : 1;
if (delta_avg_list->size() > s) {
result_pool_delta->stats.sub(delta_avg_list->front().first.stats);
*result_ts_delta -= delta_avg_list->front().second;
health_check_map_t *checks) const
{
utime_t now = ceph_clock_now();
- const auto max = cct->_conf->get_val<uint64_t>("mon_health_max_detail");
+ const auto max = cct->_conf.get_val<uint64_t>("mon_health_max_detail");
const auto& pools = osdmap.get_pools();
typedef enum pg_consequence_t {
}
}
- utime_t cutoff = now - utime_t(cct->_conf->get_val<int64_t>("mon_pg_stuck_threshold"), 0);
+ utime_t cutoff = now - utime_t(cct->_conf.get_val<int64_t>("mon_pg_stuck_threshold"), 0);
// Loop over all PGs, if there are any possibly-unhealthy states in there
if (!possible_responses.empty()) {
for (const auto& i : pg_stat) {
unsigned num_in = osdmap.get_num_in_osds();
auto sum_pg_up = std::max(static_cast<size_t>(pg_sum.up), pg_stat.size());
const auto min_pg_per_osd =
- cct->_conf->get_val<uint64_t>("mon_pg_warn_min_per_osd");
+ cct->_conf.get_val<uint64_t>("mon_pg_warn_min_per_osd");
if (num_in && min_pg_per_osd > 0 && osdmap.get_pools().size() > 0) {
auto per = sum_pg_up / num_in;
if (per < min_pg_per_osd && per) {
}
// TOO_MANY_PGS
- auto max_pg_per_osd = cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd");
+ auto max_pg_per_osd = cct->_conf.get_val<uint64_t>("mon_max_pg_per_osd");
if (num_in && max_pg_per_osd > 0) {
auto per = sum_pg_up / num_in;
if (per > max_pg_per_osd) {
if (!pg_stat.empty()) {
list<string> pgp_detail, many_detail;
const auto mon_pg_warn_min_objects =
- cct->_conf->get_val<int64_t>("mon_pg_warn_min_objects");
+ cct->_conf.get_val<int64_t>("mon_pg_warn_min_objects");
const auto mon_pg_warn_min_pool_objects =
- cct->_conf->get_val<int64_t>("mon_pg_warn_min_pool_objects");
+ cct->_conf.get_val<int64_t>("mon_pg_warn_min_pool_objects");
const auto mon_pg_warn_max_object_skew =
- cct->_conf->get_val<double>("mon_pg_warn_max_object_skew");
+ cct->_conf.get_val<double>("mon_pg_warn_max_object_skew");
for (auto p = pg_pool_sum.begin();
p != pg_pool_sum.end();
++p) {
// POOL_FULL
// POOL_NEAR_FULL
{
- float warn_threshold = (float)g_conf->get_val<int64_t>("mon_pool_quota_warn_threshold")/100;
- float crit_threshold = (float)g_conf->get_val<int64_t>("mon_pool_quota_crit_threshold")/100;
+ float warn_threshold = (float)g_conf().get_val<int64_t>("mon_pool_quota_warn_threshold")/100;
+ float crit_threshold = (float)g_conf().get_val<int64_t>("mon_pool_quota_crit_threshold")/100;
list<string> full_detail, nearfull_detail;
unsigned full_pools = 0, nearfull_pools = 0;
for (auto it : pools) {
}
// POOL_APP
- if (g_conf->get_val<bool>("mon_warn_on_pool_no_app")) {
+ if (g_conf().get_val<bool>("mon_warn_on_pool_no_app")) {
list<string> detail;
for (auto &it : pools) {
const pg_pool_t &pool = it.second;
stuckop_vec.push_back("unclean");
int64_t threshold;
cmd_getval(g_ceph_context, cmdmap, "threshold", threshold,
- g_conf->get_val<int64_t>("mon_pg_stuck_threshold"));
+ g_conf().get_val<int64_t>("mon_pg_stuck_threshold"));
if (pg_map.dump_stuck_pg_stats(ds, f, (int)threshold, stuckop_vec) < 0) {
*ss << "failed";
// if a large number of osds changed state, just iterate over the whole
// pg map.
if (need_check_down_pg_osds.size() > (unsigned)osdmap.get_num_osds() *
- g_conf->get_val<double>("mon_pg_check_down_all_threshold")) {
+ g_conf().get_val<double>("mon_pg_check_down_all_threshold")) {
check_all = true;
}
}
}
- if (!num_osds || (num_pg_copies / num_osds < g_conf->mon_reweight_min_pgs_per_osd)) {
+ if (!num_osds || (num_pg_copies / num_osds < g_conf()->mon_reweight_min_pgs_per_osd)) {
*ss << "Refusing to reweight: we only have " << num_pg_copies
<< " PGs across " << num_osds << " osds!\n";
return -EDOM;
// by osd utilization
int num_osd = std::max<size_t>(1, pgm.osd_stat.size());
if ((uint64_t)pgm.osd_sum.kb * 1024 / num_osd
- < g_conf->mon_reweight_min_bytes_per_osd) {
+ < g_conf()->mon_reweight_min_bytes_per_osd) {
*ss << "Refusing to reweight: we only have " << pgm.osd_sum.kb
<< " kb across all osds!\n";
return -EDOM;
}
if ((uint64_t)pgm.osd_sum.kb_used * 1024 / num_osd
- < g_conf->mon_reweight_min_bytes_per_osd) {
+ < g_conf()->mon_reweight_min_bytes_per_osd) {
*ss << "Refusing to reweight: we only have " << pgm.osd_sum.kb_used
<< " kb used across all osds!\n";
return -EDOM;
// set timeout event
collect_timeout_event = mon->timer.add_event_after(
- g_conf->mon_accept_timeout_factor *
- g_conf->mon_lease,
+ g_conf()->mon_accept_timeout_factor *
+ g_conf()->mon_lease,
new C_MonContext(mon, [this](int r) {
if (r == -ECANCELED)
return;
return;
}
- assert(g_conf->paxos_kill_at != 1);
+ assert(g_conf()->paxos_kill_at != 1);
// store any committed values if any are specified in the message
need_refresh = store_state(last);
- assert(g_conf->paxos_kill_at != 2);
+ assert(g_conf()->paxos_kill_at != 2);
// is everyone contiguous and up to date?
for (map<int,version_t>::iterator p = peer_last_committed.begin();
logger->tinc(l_paxos_begin_latency, to_timespan(end - start));
- assert(g_conf->paxos_kill_at != 3);
+ assert(g_conf()->paxos_kill_at != 3);
if (mon->get_quorum().size() == 1) {
// we're alone, take it easy
// set timeout event
accept_timeout_event = mon->timer.add_event_after(
- g_conf->mon_accept_timeout_factor * g_conf->mon_lease,
+ g_conf()->mon_accept_timeout_factor * g_conf()->mon_lease,
new C_MonContext(mon, [this](int r) {
if (r == -ECANCELED)
return;
assert(begin->pn == accepted_pn);
assert(begin->last_committed == last_committed);
- assert(g_conf->paxos_kill_at != 4);
+ assert(g_conf()->paxos_kill_at != 4);
logger->inc(l_paxos_begin);
logger->tinc(l_paxos_begin_latency, to_timespan(end - start));
- assert(g_conf->paxos_kill_at != 5);
+ assert(g_conf()->paxos_kill_at != 5);
// reply
MMonPaxos *accept = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_ACCEPT,
accepted.insert(from);
dout(10) << " now " << accepted << " have accepted" << dendl;
- assert(g_conf->paxos_kill_at != 6);
+ assert(g_conf()->paxos_kill_at != 6);
// only commit (and expose committed state) when we get *all* quorum
// members to accept. otherwise, they may still be sharing the now
{
dout(10) << __func__ << " " << (last_committed+1) << dendl;
- assert(g_conf->paxos_kill_at != 7);
+ assert(g_conf()->paxos_kill_at != 7);
auto t(std::make_shared<MonitorDBStore::Transaction>());
utime_t end = ceph_clock_now();
logger->tinc(l_paxos_commit_latency, end - commit_start_stamp);
- assert(g_conf->paxos_kill_at != 8);
+ assert(g_conf()->paxos_kill_at != 8);
// cancel lease - it was for the old value.
// (this would only happen if message layer lost the 'begin', but
mon->send_mon_message(commit, *p);
}
- assert(g_conf->paxos_kill_at != 9);
+ assert(g_conf()->paxos_kill_at != 9);
// get ready for a new round.
new_value.clear();
extend_lease();
}
- assert(g_conf->paxos_kill_at != 10);
+ assert(g_conf()->paxos_kill_at != 10);
finish_round();
}
//assert(is_active());
lease_expire = ceph_clock_now();
- lease_expire += g_conf->mon_lease;
+ lease_expire += g_conf()->mon_lease;
acked_lease.clear();
acked_lease.insert(mon->rank);
- dout(7) << "extend_lease now+" << g_conf->mon_lease
+ dout(7) << "extend_lease now+" << g_conf()->mon_lease
<< " (" << lease_expire << ")" << dendl;
// bcast
// if old timeout is still in place, leave it.
if (!lease_ack_timeout_event) {
lease_ack_timeout_event = mon->timer.add_event_after(
- g_conf->mon_lease_ack_timeout_factor * g_conf->mon_lease,
+ g_conf()->mon_lease_ack_timeout_factor * g_conf()->mon_lease,
new C_MonContext(mon, [this](int r) {
if (r == -ECANCELED)
return;
// set renew event
utime_t at = lease_expire;
- at -= g_conf->mon_lease;
- at += g_conf->mon_lease_renew_interval_factor * g_conf->mon_lease;
+ at -= g_conf()->mon_lease;
+ at += g_conf()->mon_lease_renew_interval_factor * g_conf()->mon_lease;
lease_renew_event = mon->timer.add_event_at(
at, new C_MonContext(mon, [this](int r) {
if (r == -ECANCELED)
utime_t now = ceph_clock_now();
if (t > now) {
utime_t diff = t - now;
- if (diff > g_conf->mon_clock_drift_allowed) {
+ if (diff > g_conf()->mon_clock_drift_allowed) {
utime_t warn_diff = now - last_clock_drift_warn;
if (warn_diff >
- pow(g_conf->mon_clock_drift_warn_backoff, clock_drift_warned)) {
+ pow(g_conf()->mon_clock_drift_warn_backoff, clock_drift_warned)) {
mon->clog->warn() << "message from " << from << " was stamped " << diff
<< "s in the future, clocks not synchronized";
last_clock_drift_warn = ceph_clock_now();
if (lease_timeout_event)
mon->timer.cancel_event(lease_timeout_event);
lease_timeout_event = mon->timer.add_event_after(
- g_conf->mon_lease_ack_timeout_factor * g_conf->mon_lease,
+ g_conf()->mon_lease_ack_timeout_factor * g_conf()->mon_lease,
new C_MonContext(mon, [this](int r) {
if (r == -ECANCELED)
return;
void Paxos::trim()
{
assert(should_trim());
- version_t end = std::min(get_version() - g_conf->paxos_min,
- get_first_committed() + g_conf->paxos_trim_max);
+ version_t end = std::min(get_version() - g_conf()->paxos_min,
+ get_first_committed() + g_conf()->paxos_trim_max);
if (first_committed >= end)
return;
t->erase(get_name(), v);
}
t->put(get_name(), "first_committed", end);
- if (g_conf->mon_compact_on_trim) {
+ if (g_conf()->mon_compact_on_trim) {
dout(10) << " compacting trimmed range" << dendl;
t->compact_range(get_name(), stringify(first_committed - 1), stringify(end));
}
*/
bool should_trim() {
int available_versions = get_version() - get_first_committed();
- int maximum_versions = g_conf->paxos_min + g_conf->paxos_trim_min;
+ int maximum_versions = g_conf()->paxos_min + g_conf()->paxos_trim_min;
if (trimming || (available_versions <= maximum_versions))
return false;
delay = 0.0;
} else {
utime_t now = ceph_clock_now();
- if ((now - paxos->last_commit_time) > g_conf->paxos_propose_interval)
- delay = (double)g_conf->paxos_min_wait;
+ if ((now - paxos->last_commit_time) > g_conf()->paxos_propose_interval)
+ delay = (double)g_conf()->paxos_min_wait;
else
- delay = (double)(g_conf->paxos_propose_interval + paxos->last_commit_time
+ delay = (double)(g_conf()->paxos_propose_interval + paxos->last_commit_time
- now);
}
return true;
*/
return (!latest_full ||
(latest_full <= get_trim_to()) ||
- (get_last_committed() - latest_full > (version_t)g_conf->paxos_stash_full_interval));
+ (get_last_committed() - latest_full > (version_t)g_conf()->paxos_stash_full_interval));
}
void PaxosService::restart()
return;
version_t to_remove = trim_to - get_first_committed();
- if (g_conf->paxos_service_trim_min > 0 &&
- to_remove < (version_t)g_conf->paxos_service_trim_min) {
+ if (g_conf()->paxos_service_trim_min > 0 &&
+ to_remove < (version_t)g_conf()->paxos_service_trim_min) {
dout(10) << __func__ << " trim_to " << trim_to << " would only trim " << to_remove
- << " < paxos_service_trim_min " << g_conf->paxos_service_trim_min << dendl;
+ << " < paxos_service_trim_min " << g_conf()->paxos_service_trim_min << dendl;
return;
}
- if (g_conf->paxos_service_trim_max > 0 &&
- to_remove > (version_t)g_conf->paxos_service_trim_max) {
+ if (g_conf()->paxos_service_trim_max > 0 &&
+ to_remove > (version_t)g_conf()->paxos_service_trim_max) {
dout(10) << __func__ << " trim_to " << trim_to << " would only trim " << to_remove
- << " > paxos_service_trim_max, limiting to " << g_conf->paxos_service_trim_max
+ << " > paxos_service_trim_max, limiting to " << g_conf()->paxos_service_trim_max
<< dendl;
- trim_to = get_first_committed() + g_conf->paxos_service_trim_max;
- to_remove = g_conf->paxos_service_trim_max;
+ trim_to = get_first_committed() + g_conf()->paxos_service_trim_max;
+ to_remove = g_conf()->paxos_service_trim_max;
}
dout(10) << __func__ << " trimming to " << trim_to << ", " << to_remove << " states" << dendl;
t->erase(get_service_name(), full_key);
}
}
- if (g_conf->mon_compact_on_trim) {
+ if (g_conf()->mon_compact_on_trim) {
dout(20) << " compacting prefix " << get_service_name() << dendl;
t->compact_range(get_service_name(), stringify(from - 1), stringify(to));
t->compact_range(get_service_name(),
epoch_t epoch;
QuorumService(Monitor *m) :
- tick_period(g_conf->mon_tick_interval),
+ tick_period(g_conf()->mon_tick_interval),
mon(m),
epoch(0)
{
Messenger *Messenger::create_client_messenger(CephContext *cct, string lname)
{
- std::string public_msgr_type = cct->_conf->ms_public_type.empty() ? cct->_conf->get_val<std::string>("ms_type") : cct->_conf->ms_public_type;
+ std::string public_msgr_type = cct->_conf->ms_public_type.empty() ? cct->_conf.get_val<std::string>("ms_type") : cct->_conf->ms_public_type;
auto nonce = ceph::util::generate_random_number<uint64_t>();
return Messenger::create(cct, public_msgr_type, entity_name_t::CLIENT(),
std::move(lname), nonce, 0);
return nullptr;
}
+/**
+ * Get the default crc flags for this messenger.
+ * but not yet dispatched.
+ */
+static int get_default_crc_flags(const ConfigProxy&);
+
+Messenger::Messenger(CephContext *cct_, entity_name_t w)
+ : trace_endpoint("0.0.0.0", 0, "Messenger"),
+ my_name(w),
+ default_send_priority(CEPH_MSG_PRIO_DEFAULT),
+ started(false),
+ magic(0),
+ socket_priority(-1),
+ cct(cct_),
+ crcflags(get_default_crc_flags(cct->_conf)) {}
+
void Messenger::set_endpoint_addr(const entity_addr_t& a,
const entity_name_t &name)
{
trace_endpoint.set_port(a.get_port());
}
-/*
+/**
+ * Get the default crc flags for this messenger.
+ * but not yet dispatched.
+ *
* Pre-calculate desired software CRC settings. CRC computation may
* be disabled by default for some transports (e.g., those with strong
* hardware checksum support).
*/
-int Messenger::get_default_crc_flags(md_config_t * conf)
+int get_default_crc_flags(const ConfigProxy& conf)
{
int r = 0;
if (conf->ms_crc_data)
* Messenger users should construct full implementations directly,
* or use the create() function.
*/
- Messenger(CephContext *cct_, entity_name_t w)
- : trace_endpoint("0.0.0.0", 0, "Messenger"),
- my_name(w),
- default_send_priority(CEPH_MSG_PRIO_DEFAULT),
- started(false),
- magic(0),
- socket_priority(-1),
- cct(cct_),
- crcflags(get_default_crc_flags(cct->_conf)) {}
+ Messenger(CephContext *cct_, entity_name_t w);
virtual ~Messenger() {}
/**
* (0 if the queue is empty)
*/
virtual double get_dispatch_queue_max_age(utime_t now) = 0;
- /**
- * Get the default crc flags for this messenger.
- * but not yet dispatched.
- */
- static int get_default_crc_flags(md_config_t *);
/**
* @} // Accessors
/// the default Policy we use for Pipes
policy_t default_policy;
/// map specifying different Policies for specific peer types
- map<int, policy_t> policy_map; // entity_name_t::type -> Policy
+ std::map<int, policy_t> policy_map; // entity_name_t::type -> Policy
public:
const policy_t& get(peer_type_t peer_type) const {
void AsyncConnection::maybe_start_delay_thread()
{
if (!delay_state) {
- async_msgr->cct->_conf->with_val<std::string>(
+ async_msgr->cct->_conf.with_val<std::string>(
"ms_inject_delay_type",
[this](const string& s) {
if (s.find(ceph_entity_type_name(peer_type)) != string::npos) {
const set<int>& avoid_ports,
entity_addrvec_t* bound_addrs)
{
- const md_config_t *conf = msgr->cct->_conf;
+ const auto& conf = msgr->cct->_conf;
// bind to socket(s)
ldout(msgr->cct, 10) << __func__ << " " << bind_addrs << dendl;
int Accepter::bind(const entity_addr_t &bind_addr, const set<int>& avoid_ports)
{
- const md_config_t *conf = msgr->cct->_conf;
+ const auto& conf = msgr->cct->_conf;
// bind to a socket
ldout(msgr->cct,10) << __func__ << dendl;
void Pipe::maybe_start_delay_thread()
{
if (!delay_thread) {
- auto pos = msgr->cct->_conf->get_val<std::string>("ms_inject_delay_type").find(ceph_entity_type_name(connection_state->peer_type));
+ auto pos = msgr->cct->_conf.get_val<std::string>("ms_inject_delay_type").find(ceph_entity_type_name(connection_state->peer_type));
if (pos != string::npos) {
lsubdout(msgr->cct, ms, 1) << "setting up a delay queue on Pipe " << this << dendl;
delay_thread = new DelayedDelivery(this);
entity_addr_t peer_addr_for_me, socket_addr;
AuthAuthorizer *authorizer = NULL;
bufferlist addrbl, myaddrbl;
- const md_config_t *conf = msgr->cct->_conf;
+ const auto& conf = msgr->cct->_conf;
// close old socket. this is safe because we stopped the reader thread above.
if (sd >= 0)
void Pipe::fault(bool onread)
{
- const md_config_t *conf = msgr->cct->_conf;
+ const auto& conf = msgr->cct->_conf;
assert(pipe_lock.is_locked());
cond.Signal();
"-d", // debug
};
int c = 3;
- auto fuse_debug = store->cct->_conf->get_val<bool>("fuse_debug");
+ auto fuse_debug = store->cct->_conf.get_val<bool>("fuse_debug");
if (fuse_debug)
++c;
return fuse_main(c, (char**)v, &fs_oper, (void*)this);
"-d", // debug
};
int c = 3;
- auto fuse_debug = store->cct->_conf->get_val<bool>("fuse_debug");
+ auto fuse_debug = store->cct->_conf.get_val<bool>("fuse_debug");
if (fuse_debug)
++c;
fuse_args a = FUSE_ARGS_INIT(c, (char**)v);
: file(f),
pos(0),
buffer_appender(buffer.get_page_aligned_appender(
- g_conf->bluefs_alloc_size / CEPH_PAGE_SIZE)) {
+ g_conf()->bluefs_alloc_size / CEPH_PAGE_SIZE)) {
++file->num_writers;
iocv.fill(nullptr);
dirty_devs.fill(false);
<< " expected_allocations=" << bi.expected_allocations
<< dendl;
int64_t benefit = blob_expected_for_release - bi.expected_allocations;
- if (benefit >= g_conf->bluestore_gc_enable_blob_threshold) {
+ if (benefit >= g_conf()->bluestore_gc_enable_blob_threshold) {
if (bi.collect_candidate) {
auto it = bi.first_lextent;
bool bExit = false;
assert(p != onode_lru.begin());
--p;
int skipped = 0;
- int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
+ int max_skipped = g_conf()->bluestore_cache_trim_max_skip_pinned;
while (num > 0) {
Onode *o = &*p;
int refs = o->nref.load();
assert(p != onode_lru.begin());
--p;
int skipped = 0;
- int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
+ int max_skipped = g_conf()->bluestore_cache_trim_max_skip_pinned;
while (num > 0) {
Onode *o = &*p;
dout(20) << __func__ << " considering " << o << dendl;
}
// avoid resharding the trailing shard, even if it is small
else if (n != shards.end() &&
- len < g_conf->bluestore_extent_map_shard_min_size) {
+ len < g_conf()->bluestore_extent_map_shard_min_size) {
assert(endoff != OBJECT_MAX_SIZE);
if (p == shards.begin()) {
// we are the first shard, combine with next shard
mempool_thread(this)
{
_init_logger();
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
set_cache_shards(1);
}
mempool_thread(this)
{
_init_logger();
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
set_cache_shards(1);
}
}
finishers.clear();
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
_shutdown_logger();
assert(!mounted);
assert(db == NULL);
return KEYS;
}
-void BlueStore::handle_conf_change(const md_config_t *conf,
+void BlueStore::handle_conf_change(const md_config_t *mconf,
const std::set<std::string> &changed)
{
if (changed.count("bluestore_csum_type")) {
_set_throttle_params();
}
}
+ ConfigReader conf{mconf};
if (changed.count("bluestore_throttle_bytes")) {
throttle_bytes.reset_max(conf->bluestore_throttle_bytes);
throttle_deferred_bytes.reset_max(
int BlueStore::_set_cache_sizes()
{
assert(bdev);
- cache_autotune = cct->_conf->get_val<bool>("bluestore_cache_autotune");
+ cache_autotune = cct->_conf.get_val<bool>("bluestore_cache_autotune");
cache_autotune_chunk_size =
- cct->_conf->get_val<uint64_t>("bluestore_cache_autotune_chunk_size");
+ cct->_conf.get_val<uint64_t>("bluestore_cache_autotune_chunk_size");
cache_autotune_interval =
- cct->_conf->get_val<double>("bluestore_cache_autotune_interval");
+ cct->_conf.get_val<double>("bluestore_cache_autotune_interval");
if (cct->_conf->bluestore_cache_size) {
cache_size = cct->_conf->bluestore_cache_size;
{
// sanity check(s)
auto osd_max_object_size =
- cct->_conf->get_val<uint64_t>("osd_max_object_size");
+ cct->_conf.get_val<uint64_t>("osd_max_object_size");
if (osd_max_object_size >= (uint64_t)OBJECT_MAX_SIZE) {
derr << __func__ << " osd_max_object_size >= 0x" << std::hex << OBJECT_MAX_SIZE
<< "; BlueStore has hard limit of 0x" << OBJECT_MAX_SIZE << "." << std::dec << dendl;
dout(1) << __func__ << " main device size " << byte_u_t(dev_size)
<< " is too small, disable bluestore_bluefs_min for now"
<< dendl;
- int r = cct->_conf->set_val("bluestore_bluefs_min", "0");
+ int r = cct->_conf.set_val("bluestore_bluefs_min", "0");
assert(r == 0);
}
return 0;
options = cct->_conf->bluestore_rocksdb_options;
map<string,string> cf_map;
- cct->_conf->with_val<string>("bluestore_rocksdb_cfs",
+ cct->_conf.with_val<string>("bluestore_rocksdb_cfs",
get_str_map,
&cf_map,
" \t");
if (to_repair_db)
return 0;
if (create) {
- if (cct->_conf->get_val<bool>("bluestore_rocksdb_cf")) {
+ if (cct->_conf.get_val<bool>("bluestore_rocksdb_cf")) {
r = db->create_and_open(err, cfs);
} else {
r = db->create_and_open(err);
gift = g;
reclaim = 0;
}
- uint64_t min_free = cct->_conf->get_val<uint64_t>("bluestore_bluefs_min_free");
+ uint64_t min_free = cct->_conf.get_val<uint64_t>("bluestore_bluefs_min_free");
if (bluefs_free < min_free &&
min_free < free_cap) {
uint64_t g = min_free - bluefs_free;
spg_t pgid;
mempool::bluestore_fsck::list<string> expecting_shards;
for (it->lower_bound(string()); it->valid(); it->next()) {
- if (g_conf->bluestore_debug_fsck_abort) {
+ if (g_conf()->bluestore_debug_fsck_abort) {
goto out_scan;
}
dout(30) << __func__ << " key "
notify = true;
}
if (txc->state == TransContext::STATE_DEFERRED_QUEUED &&
- osr->q.size() > g_conf->bluestore_max_deferred_txc) {
+ osr->q.size() > g_conf()->bluestore_max_deferred_txc) {
submit_deferred = true;
}
break;
dout(20) << __func__ << " write 0x" << std::hex
<< start << "~" << bl.length()
<< " crc " << bl.crc32c(-1) << std::dec << dendl;
- if (!g_conf->bluestore_debug_omit_block_device_write) {
+ if (!g_conf()->bluestore_debug_omit_block_device_write) {
logger->inc(l_bluestore_deferred_write_ops);
logger->inc(l_bluestore_deferred_write_bytes, bl.length());
int r = bdev->aio_write(start, bl, &b->ioc, false);
_buffer_cache_write(txc, b, b_off, bl,
wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
- if (!g_conf->bluestore_debug_omit_block_device_write) {
+ if (!g_conf()->bluestore_debug_omit_block_device_write) {
if (b_len <= prefer_deferred_size) {
dout(20) << __func__ << " deferring small 0x" << std::hex
<< b_len << std::dec << " unused write via deferred" << dendl;
wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
// queue io
- if (!g_conf->bluestore_debug_omit_block_device_write) {
+ if (!g_conf()->bluestore_debug_omit_block_device_write) {
if (l->length() <= prefer_deferred_size.load()) {
dout(20) << __func__ << " deferring small 0x" << std::hex
<< l->length() << std::dec << " write via deferred" << dendl;
o->onode.size = end;
}
- if (benefit >= g_conf->bluestore_gc_enable_total_threshold) {
+ if (benefit >= g_conf()->bluestore_gc_enable_total_threshold) {
if (!gc.get_extents_to_collect().empty()) {
dout(20) << __func__ << " perform garbage collection, "
<< "expected benefit = " << benefit << " AUs" << dendl;
int r = 0;
uint64_t lba_off, lba_count;
- uint32_t max_io_completion = (uint32_t)g_conf->get_val<uint64_t>("bluestore_spdk_max_io_completion");
- uint64_t io_sleep_in_us = g_conf->get_val<uint64_t>("bluestore_spdk_io_sleep");
+ uint32_t max_io_completion = (uint32_t)g_conf().get_val<uint64_t>("bluestore_spdk_max_io_completion");
+ uint64_t io_sleep_in_us = g_conf().get_val<uint64_t>("bluestore_spdk_io_sleep");
ceph::coarse_real_clock::time_point cur, start
= ceph::coarse_real_clock::now();
}
}
- auto coremask_arg = g_conf->get_val<std::string>("bluestore_spdk_coremask");
+ auto coremask_arg = g_conf().get_val<std::string>("bluestore_spdk_coremask");
int m_core_arg = -1;
try {
auto core_value = stoull(coremask_arg, nullptr, 16);
}
m_core_arg -= 1;
- uint32_t mem_size_arg = (uint32_t)g_conf->get_val<uint64_t>("bluestore_spdk_mem");
+ uint32_t mem_size_arg = (uint32_t)g_conf().get_val<uint64_t>("bluestore_spdk_mem");
if (!init) {
explicit FDCache(CephContext *cct) : cct(cct),
registry_shards(std::max<int64_t>(cct->_conf->filestore_fd_cache_shards, 1)) {
assert(cct);
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
registry = new SharedLRU<ghobject_t, FD>[registry_shards];
for (int i = 0; i < registry_shards; ++i) {
registry[i].set_cct(cct);
}
}
~FDCache() override {
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
delete[] registry;
}
typedef std::shared_ptr<FD> FDRef;
};
return KEYS;
}
- void handle_conf_change(const md_config_t *conf,
+ void handle_conf_change(const md_config_t *mconf,
const std::set<std::string> &changed) override {
+ ConfigReader conf{mconf};
if (changed.count("filestore_fd_cache_size")) {
for (int i = 0; i < registry_shards; ++i)
registry[i].set_size(
}
#endif
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
}
~FileJournal() override {
assert(fd == -1);
delete[] zero_buf;
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
}
int check() override;
logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
superblock.compat_features = get_fs_initial_compat_set();
}
delete *it;
*it = nullptr;
}
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
cct->get_perfcounters_collection()->remove(logger);
if (journal)
int orig = cct->_conf->filestore_inject_stall;
dout(5) << __FUNC__ << ": filestore_inject_stall " << orig << ", sleeping" << dendl;
sleep(orig);
- cct->_conf->set_val("filestore_inject_stall", "0");
+ cct->_conf.set_val("filestore_inject_stall", "0");
dout(5) << __FUNC__ << ": done stalling" << dendl;
}
}
}
skip:
- if (g_conf->subsys.should_gather<ceph_subsys_filestore, 20>()) {
+ if (g_conf()->subsys.should_gather<ceph_subsys_filestore, 20>()) {
for (auto& p : aset) {
dout(20) << __FUNC__ << ": set " << p.first << dendl;
}
return KEYS;
}
-void FileStore::handle_conf_change(const md_config_t *conf,
+void FileStore::handle_conf_change(const md_config_t *mconf,
const std::set <std::string> &changed)
{
if (changed.count("filestore_max_inline_xattr_size") ||
set_throttle_params();
}
+ ConfigReader conf{mconf};
if (changed.count("filestore_min_sync_interval") ||
changed.count("filestore_max_sync_interval") ||
changed.count("filestore_kill_at") ||
for (unsigned i = l_wbthrottle_first + 1; i != l_wbthrottle_last; ++i)
logger->set(i, 0);
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
}
WBThrottle::~WBThrottle() {
assert(cct);
cct->get_perfcounters_collection()->remove(logger);
delete logger;
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
}
void WBThrottle::start()
dout(10) << __func__ << " " << poid << " pos " << pos << dendl;
int r;
bool skip_data_digest = store->has_builtin_csum() &&
- g_conf->osd_skip_data_digest;
+ g_conf()->osd_skip_data_digest;
uint32_t fadvise_flags = CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
recoverystate_perf(osd->recoverystate_perf),
monc(osd->monc),
class_handler(osd->class_handler),
- osd_max_object_size(*cct->_conf, "osd_max_object_size"),
- osd_skip_data_digest(*cct->_conf, "osd_skip_data_digest"),
+ osd_max_object_size(cct->_conf, "osd_max_object_size"),
+ osd_skip_data_digest(cct->_conf, "osd_skip_data_digest"),
publish_lock("OSDService::publish_lock"),
pre_publish_lock("OSDService::pre_publish_lock"),
max_oldest_map(0),
if (r < 0)
return r;
- string key = cct->_conf->get_val<string>("key");
+ string key = cct->_conf.get_val<string>("key");
if (key.size()) {
r = store->write_meta("osd_key", key);
if (r < 0)
return r;
} else {
- string keyfile = cct->_conf->get_val<string>("keyfile");
+ string keyfile = cct->_conf.get_val<string>("keyfile");
if (!keyfile.empty()) {
bufferlist keybl;
string err;
store_is_rotational(store->is_rotational()),
trace_endpoint("0.0.0.0", 0, "osd"),
asok_hook(NULL),
- m_osd_pg_epoch_max_lag_factor(cct->_conf->get_val<double>(
+ m_osd_pg_epoch_max_lag_factor(cct->_conf.get_val<double>(
"osd_pg_epoch_max_lag_factor")),
osd_compat(get_osd_compat_set()),
osd_op_tp(cct, "OSD::osd_op_tp", "tp_osd_tp",
return -EBUSY;
}
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
return 0;
}
if (!store_is_rotational && !journal_is_rotational)
return cct->_conf->osd_recovery_sleep_ssd;
else if (store_is_rotational && !journal_is_rotational)
- return cct->_conf->get_val<double>("osd_recovery_sleep_hybrid");
+ return cct->_conf.get_val<double>("osd_recovery_sleep_hybrid");
else
return cct->_conf->osd_recovery_sleep_hdd;
}
while (monc->wait_auth_rotating(30.0) < 0) {
derr << "unable to obtain rotating service keys; retrying" << dendl;
++rotating_auth_attempts;
- if (rotating_auth_attempts > g_conf->max_rotating_auth_attempts) {
+ if (rotating_auth_attempts > g_conf()->max_rotating_auth_attempts) {
derr << __func__ << " wait_auth_rotating timed out" << dendl;
exit(1);
}
set_state(STATE_STOPPING);
// Debugging
- if (cct->_conf->get_val<bool>("osd_debug_shutdown")) {
- cct->_conf->set_val("debug_osd", "100");
- cct->_conf->set_val("debug_journal", "100");
- cct->_conf->set_val("debug_filestore", "100");
- cct->_conf->set_val("debug_bluestore", "100");
- cct->_conf->set_val("debug_ms", "100");
- cct->_conf->apply_changes(NULL);
+ if (cct->_conf.get_val<bool>("osd_debug_shutdown")) {
+ cct->_conf.set_val("debug_osd", "100");
+ cct->_conf.set_val("debug_journal", "100");
+ cct->_conf.set_val("debug_filestore", "100");
+ cct->_conf.set_val("debug_bluestore", "100");
+ cct->_conf.set_val("debug_ms", "100");
+ cct->_conf.apply_changes(NULL);
}
// stop MgrClient earlier as it's more like an internal consumer of OSD
#ifdef PG_DEBUG_REFS
service.dump_live_pgids();
#endif
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
service.meta_ch.reset();
bool is_mon_create)
{
const auto max_pgs_per_osd =
- (cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd") *
- cct->_conf->get_val<double>("osd_max_pg_per_osd_hard_ratio"));
+ (cct->_conf.get_val<uint64_t>("mon_max_pg_per_osd") *
+ cct->_conf.get_val<double>("osd_max_pg_per_osd_hard_ratio"));
if (num_pgs < max_pgs_per_osd) {
return false;
bool have_pending_creates = false;
{
const auto max_pgs_per_osd =
- (cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd") *
- cct->_conf->get_val<double>("osd_max_pg_per_osd_hard_ratio"));
+ (cct->_conf.get_val<uint64_t>("mon_max_pg_per_osd") *
+ cct->_conf.get_val<double>("osd_max_pg_per_osd_hard_ratio"));
if (max_pgs_per_osd <= num_pgs) {
// this could happen if admin decreases this setting before a PG is removed
return;
cmd_getval(service->cct, cmdmap, "utime", delay, (int64_t)0);
ostringstream oss;
oss << delay;
- int r = service->cct->_conf->set_val("osd_recovery_delay_start",
+ int r = service->cct->_conf.set_val("osd_recovery_delay_start",
oss.str().c_str());
if (r != 0) {
ss << "set_recovery_delay: error setting "
<< r;
return;
}
- service->cct->_conf->apply_changes(NULL);
+ service->cct->_conf.apply_changes(NULL);
ss << "set_recovery_delay: set osd_recovery_delay_start "
<< "to " << service->cct->_conf->osd_recovery_delay_start;
return;
double pool_scrub_max_interval = 0;
p->opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &pool_scrub_max_interval);
double scrub_max_interval = pool_scrub_max_interval > 0 ?
- pool_scrub_max_interval : g_conf->osd_scrub_max_interval;
+ pool_scrub_max_interval : g_conf()->osd_scrub_max_interval;
// Instead of marking must_scrub force a schedule scrub
utime_t stamp = ceph_clock_now();
stamp -= scrub_max_interval;
for (vector<string>::iterator a = ++argsvec.begin(); a != argsvec.end(); ++a)
args += " " + *a;
osd_lock.Unlock();
- r = cct->_conf->injectargs(args, &ss);
+ r = cct->_conf.injectargs(args, &ss);
osd_lock.Lock();
}
else if (prefix == "config set") {
cmd_getval(cct, cmdmap, "key", key);
cmd_getval(cct, cmdmap, "value", val);
osd_lock.Unlock();
- r = cct->_conf->set_val(key, val, &ss);
+ r = cct->_conf.set_val(key, val, &ss);
if (r == 0) {
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
}
osd_lock.Lock();
}
cmd_getval(cct, cmdmap, "key", key);
osd_lock.Unlock();
std::string val;
- r = cct->_conf->get_val(key, &val);
+ r = cct->_conf.get_val(key, &val);
if (r == 0) {
ds << val;
}
std::string key;
cmd_getval(cct, cmdmap, "key", key);
osd_lock.Unlock();
- r = cct->_conf->rm_val(key);
+ r = cct->_conf.rm_val(key);
if (r == 0) {
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.apply_changes(nullptr);
}
if (r == -ENOENT) {
r = 0; // make command idempotent
cmd_getval(cct, cmdmap, "delay", delay);
ostringstream oss;
oss << delay;
- r = cct->_conf->set_val("osd_recovery_delay_start", oss.str().c_str());
+ r = cct->_conf.set_val("osd_recovery_delay_start", oss.str().c_str());
if (r != 0) {
ss << "kick_recovery_wq: error setting "
<< "osd_recovery_delay_start to '" << delay << "': error "
<< r;
goto out;
}
- cct->_conf->apply_changes(NULL);
+ cct->_conf.apply_changes(NULL);
ss << "kicking recovery queue. set osd_recovery_delay_start "
<< "to " << cct->_conf->osd_recovery_delay_start;
}
{
set<string> devnames;
store->get_devices(&devnames);
- uint64_t smart_timeout = cct->_conf->get_val<uint64_t>(
+ uint64_t smart_timeout = cct->_conf.get_val<uint64_t>(
"osd_smart_report_timeout");
// == typedef std::map<std::string, mValue> mObject;
utime_t oldest_secs;
const utime_t now = ceph_clock_now();
auto too_old = now;
- too_old -= cct->_conf->get_val<double>("osd_op_complaint_time");
+ too_old -= cct->_conf.get_val<double>("osd_op_complaint_time");
int slow = 0;
TrackedOpRef oldest_op;
auto count_slow_ops = [&](TrackedOp& op) {
void enqueue_front(OpQueueItem&& qi);
void maybe_inject_dispatch_delay() {
- if (g_conf->osd_debug_inject_dispatch_delay_probability > 0) {
+ if (g_conf()->osd_debug_inject_dispatch_delay_probability > 0) {
if (rand() % 10000 <
- g_conf->osd_debug_inject_dispatch_delay_probability * 10000) {
+ g_conf()->osd_debug_inject_dispatch_delay_probability * 10000) {
utime_t t;
- t.set_from_double(g_conf->osd_debug_inject_dispatch_delay_duration);
+ t.set_from_double(g_conf()->osd_debug_inject_dispatch_delay_duration);
t.sleep();
}
}
} else {
// count osds
int maxosd = 0;
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
vector<string> sections;
- conf->get_all_sections(sections);
+ conf.get_all_sections(sections);
for (auto §ion : sections) {
if (section.find("osd.") != 0)
pools[pool].set_flag(pg_pool_t::FLAG_NOPGCHANGE);
if (cct->_conf->osd_pool_default_flag_nosizechange)
pools[pool].set_flag(pg_pool_t::FLAG_NOSIZECHANGE);
- pools[pool].size = cct->_conf->get_val<uint64_t>("osd_pool_default_size");
- pools[pool].min_size = cct->_conf->get_osd_pool_default_min_size();
+ pools[pool].size = cct->_conf.get_val<uint64_t>("osd_pool_default_size");
+ pools[pool].min_size = cct->_conf.get_osd_pool_default_min_size();
pools[pool].crush_rule = default_replicated_rule;
pools[pool].object_hash = CEPH_STR_HASH_RJENKINS;
pools[pool].set_pg_num(poolbase << pg_bits);
map<string,string> &profile_map,
ostream *ss)
{
- int r = get_json_str_map(cct->_conf->get_val<string>("osd_pool_default_erasure_code_profile"),
+ int r = get_json_str_map(cct->_conf.get_val<string>("osd_pool_default_erasure_code_profile"),
*ss,
&profile_map);
return r;
CrushWrapper& crush,
ostream *ss)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
crush.create();
// add osds
vector<string> sections;
- conf->get_all_sections(sections);
+ conf.get_all_sections(sections);
for (auto §ion : sections) {
if (section.find("osd.") != 0)
vector<string> sectiontmp;
sectiontmp.push_back("osd");
sectiontmp.push_back(section);
- conf->get_val_from_conf_file(sectiontmp, "host", host, false);
- conf->get_val_from_conf_file(sectiontmp, "rack", rack, false);
- conf->get_val_from_conf_file(sectiontmp, "row", row, false);
- conf->get_val_from_conf_file(sectiontmp, "room", room, false);
- conf->get_val_from_conf_file(sectiontmp, "datacenter", dc, false);
- conf->get_val_from_conf_file(sectiontmp, "root", pool, false);
+ conf.get_val_from_conf_file(sectiontmp, "host", host, false);
+ conf.get_val_from_conf_file(sectiontmp, "rack", rack, false);
+ conf.get_val_from_conf_file(sectiontmp, "row", row, false);
+ conf.get_val_from_conf_file(sectiontmp, "room", room, false);
+ conf.get_val_from_conf_file(sectiontmp, "datacenter", dc, false);
+ conf.get_val_from_conf_file(sectiontmp, "root", pool, false);
if (host.length() == 0)
host = "unknownhost";
{
// An osd could configure failsafe ratio, to something different
// but for now assume it is the same here.
- float fsr = g_conf->osd_failsafe_full_ratio;
+ float fsr = g_conf()->osd_failsafe_full_ratio;
if (fsr > 1.0) fsr /= 100;
float fr = get_full_ratio();
float br = get_backfillfull_ratio();
}
// OLD_CRUSH_TUNABLES
- if (g_conf->mon_warn_on_legacy_crush_tunables) {
+ if (g_conf()->mon_warn_on_legacy_crush_tunables) {
string min = crush->get_min_required_version();
- if (min < g_conf->mon_crush_min_required_version) {
+ if (min < g_conf()->mon_crush_min_required_version) {
ostringstream ss;
ss << "crush map has legacy tunables (require " << min
- << ", min is " << g_conf->mon_crush_min_required_version << ")";
+ << ", min is " << g_conf()->mon_crush_min_required_version << ")";
auto& d = checks->add("OLD_CRUSH_TUNABLES", HEALTH_WARN, ss.str());
d.detail.push_back("see http://docs.ceph.com/docs/master/rados/operations/crush-map/#tunables");
}
}
// OLD_CRUSH_STRAW_CALC_VERSION
- if (g_conf->mon_warn_on_crush_straw_calc_version_zero) {
+ if (g_conf()->mon_warn_on_crush_straw_calc_version_zero) {
if (crush->get_straw_calc_version() == 0) {
ostringstream ss;
ss << "crush map has straw_calc_version=0";
}
// CACHE_POOL_NO_HIT_SET
- if (g_conf->mon_warn_on_cache_pools_without_hit_sets) {
+ if (g_conf()->mon_warn_on_cache_pools_without_hit_sets) {
list<string> detail;
for (map<int64_t, pg_pool_t>::const_iterator p = pools.begin();
p != pools.end();
version_t auth_version = auth_info.last_update.version;
version_t candidate_version = shard_info.last_update.version;
if (auth_version > candidate_version &&
- (auth_version - candidate_version) > cct->_conf->get_val<uint64_t>("osd_async_recovery_min_pg_log_entries")) {
+ (auth_version - candidate_version) > cct->_conf.get_val<uint64_t>("osd_async_recovery_min_pg_log_entries")) {
candidates_by_cost.insert(make_pair(auth_version - candidate_version, shard_i));
}
}
} else {
approx_entries = candidate_version - auth_version;
}
- if (approx_entries > cct->_conf->get_val<uint64_t>("osd_async_recovery_min_pg_log_entries")) {
+ if (approx_entries > cct->_conf.get_val<uint64_t>("osd_async_recovery_min_pg_log_entries")) {
candidates_by_cost.insert(make_pair(approx_entries, shard_i));
}
}
osd->clog->debug(oss);
}
- scrubber.preempt_left = cct->_conf->get_val<uint64_t>(
+ scrubber.preempt_left = cct->_conf.get_val<uint64_t>(
"osd_scrub_max_preemptions");
scrubber.preempt_divisor = 1;
break;
break;
}
- scrubber.preempt_left = cct->_conf->get_val<uint64_t>(
+ scrubber.preempt_left = cct->_conf.get_val<uint64_t>(
"osd_scrub_max_preemptions");
scrubber.preempt_divisor = 1;
stringstream ss;
ceph::ErasureCodePluginRegistry::instance().factory(
profile.find("plugin")->second,
- cct->_conf->get_val<std::string>("erasure_code_dir"),
+ cct->_conf.get_val<std::string>("erasure_code_dir"),
ec_profile,
&ec_impl,
&ss);
is_down() ||
is_incomplete() ||
(!is_active() && is_peered());
- if (g_conf->osd_backoff_on_peering && !backoff) {
+ if (g_conf()->osd_backoff_on_peering && !backoff) {
if (is_peering()) {
backoff = true;
}
return;
}
if (can_backoff &&
- (g_conf->osd_backoff_on_degraded ||
- (g_conf->osd_backoff_on_unfound && missing_loc.is_unfound(head)))) {
+ (g_conf()->osd_backoff_on_degraded ||
+ (g_conf()->osd_backoff_on_unfound && missing_loc.is_unfound(head)))) {
add_backoff(session, head, head);
maybe_kick_recovery(head);
} else {
// degraded object?
if (write_ordered && is_degraded_or_backfilling_object(head)) {
- if (can_backoff && g_conf->osd_backoff_on_degraded) {
+ if (can_backoff && g_conf()->osd_backoff_on_degraded) {
add_backoff(session, head, head);
maybe_kick_recovery(head);
} else {
CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
bool skip_data_digest = store->has_builtin_csum() &&
- g_conf->osd_skip_data_digest;
+ g_conf()->osd_skip_data_digest;
utime_t sleeptime;
sleeptime.set_from_double(cct->_conf->osd_debug_deep_scrub_sleep);
} else {
iter->seek_to_first();
}
- int max = g_conf->osd_deep_scrub_keys;
+ int max = g_conf()->osd_deep_scrub_keys;
while (iter->status() == 0 && iter->valid()) {
pos.omap_bytes += iter->value().length();
++pos.omap_keys;
if (b) {
dout(10) << __func__ << " session " << this << " has backoff " << *b
<< " for " << *m << dendl;
- assert(!b->is_acked() || !g_conf->osd_debug_crash_on_ignored_backoff);
+ assert(!b->is_acked() || !g_conf()->osd_debug_crash_on_ignored_backoff);
return true;
}
// we may race with ms_handle_reset. it clears session->con before removing
assert(check(oid));
set<string> to_remove;
to_remove.insert(to_object_key(oid));
- if (g_conf->subsys.should_gather<ceph_subsys_osd, 20>()) {
+ if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_remove) {
dout(20) << __func__ << " rm " << i << dendl;
}
encode(in, bl);
to_set[to_object_key(oid)] = bl;
dout(20) << __func__ << " " << oid << " " << in.snaps << dendl;
- if (g_conf->subsys.should_gather<ceph_subsys_osd, 20>()) {
+ if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_set) {
dout(20) << __func__ << " set " << i.first << dendl;
}
to_remove.insert(to_raw_key(make_pair(*i, oid)));
}
}
- if (g_conf->subsys.should_gather<ceph_subsys_osd, 20>()) {
+ if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_remove) {
dout(20) << __func__ << " rm " << i << dendl;
}
++i) {
to_add.insert(to_raw(make_pair(*i, oid)));
}
- if (g_conf->subsys.should_gather<ceph_subsys_osd, 20>()) {
+ if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_add) {
dout(20) << __func__ << " set " << i.first << dendl;
}
++i) {
to_remove.insert(to_raw_key(make_pair(*i, oid)));
}
- if (g_conf->subsys.should_gather<ceph_subsys_osd, 20>()) {
+ if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_remove) {
dout(20) << __func__ << " rm " << i << dendl;
}
// prefetch intelligently.
// (watch out, this is big if you use big objects or weird striping)
- uint64_t periods = cct->_conf->get_val<uint64_t>("journaler_prefetch_periods");
+ uint64_t periods = cct->_conf.get_val<uint64_t>("journaler_prefetch_periods");
fetch_len = layout.get_period() * periods;
}
bool Journaler::_write_head_needed()
{
- return last_wrote_head + seconds(cct->_conf->get_val<int64_t>("journaler_write_head_interval"))
+ return last_wrote_head + seconds(cct->_conf.get_val<int64_t>("journaler_write_head_interval"))
< ceph::real_clock::now();
}
{
assert(prezeroing_pos >= flush_pos);
- uint64_t num_periods = cct->_conf->get_val<uint64_t>("journaler_prezero_periods");
+ uint64_t num_periods = cct->_conf.get_val<uint64_t>("journaler_prezero_periods");
/*
* issue zero requests based on write_pos, even though the invariant
* is that we zero ahead of flush_pos.
update_crush_location();
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
initialized = true;
}
initialized = false;
- cct->_conf->remove_observer(this);
+ cct->_conf.remove_observer(this);
map<int,OSDSession*>::iterator p;
while (!osd_sessions.empty()) {
#endif
char path[PATH_MAX];
- snprintf(path, sizeof(path), "%s", g_conf->log_file.c_str());
+ snprintf(path, sizeof(path), "%s", g_conf()->log_file.c_str());
char *last_slash = rindex(path, '/');
if (last_slash == NULL) {
snprintf(profile_name, profile_name_len, "./%s.profile",
- g_conf->name.to_cstr());
+ g_conf()->name.to_cstr());
}
else {
last_slash[1] = '\0';
snprintf(profile_name, profile_name_len, "%s/%s.profile",
- path, g_conf->name.to_cstr());
+ path, g_conf()->name.to_cstr());
}
#if __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop
}
char heap_stats[HEAP_PROFILER_STATS_SIZE];
ceph_heap_profiler_stats(heap_stats, sizeof(heap_stats));
- out << g_conf->name << " dumping heap profile now.\n"
+ out << g_conf()->name << " dumping heap profile now.\n"
<< heap_stats;
ceph_heap_profiler_dump("admin request");
} else if (cmd.size() == 1 && cmd[0] == "start_profiler") {
ceph_heap_profiler_start();
- out << g_conf->name << " started profiler";
+ out << g_conf()->name << " started profiler";
} else if (cmd.size() == 1 && cmd[0] == "stop_profiler") {
ceph_heap_profiler_stop();
- out << g_conf->name << " stopped profiler";
+ out << g_conf()->name << " stopped profiler";
} else if (cmd.size() == 1 && cmd[0] == "release") {
ceph_heap_release_free_memory();
- out << g_conf->name << " releasing free RAM back to system.";
+ out << g_conf()->name << " releasing free RAM back to system.";
} else
#endif
if (cmd.size() == 1 && cmd[0] == "stats") {
char heap_stats[HEAP_PROFILER_STATS_SIZE];
ceph_heap_profiler_stats(heap_stats, sizeof(heap_stats));
- out << g_conf->name << " tcmalloc heap stats:"
+ out << g_conf()->name << " tcmalloc heap stats:"
<< heap_stats;
} else {
out << "unknown command " << cmd;
void set_ctx(CephContext *_cct) {
cct = _cct;
lru_window = cct->_conf->rgw_cache_lru_size / 2;
- expiry = std::chrono::seconds(cct->_conf->get_val<uint64_t>(
+ expiry = std::chrono::seconds(cct->_conf.get_val<uint64_t>(
"rgw_cache_expiry_interval"));
}
bool chain_cache_entry(std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
size_t send_status(const int status,
const char* const status_name) override {
if ((204 == status || 304 == status) &&
- ! g_conf->rgw_print_prohibited_content_length) {
+ ! g_conf()->rgw_print_prohibited_content_length) {
action = ContentLengthAction::INHIBIT;
} else {
action = ContentLengthAction::FORWARD;
}
void RGWAsyncRadosProcessor::RGWWQ::_dump_queue() {
- if (!g_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
+ if (!g_conf()->subsys.should_gather<ceph_subsys_rgw, 20>()) {
return;
}
deque<RGWAsyncRadosRequest *>::iterator iter;
RGWAsyncRadosProcessor::RGWAsyncRadosProcessor(RGWRados *_store, int num_threads)
: store(_store), m_tp(store->ctx(), "RGWAsyncRadosProcessor::m_tp", "rados_async", num_threads),
req_throttle(store->ctx(), "rgw_async_rados_ops", num_threads * 2),
- req_wq(this, g_conf->rgw_op_thread_timeout,
- g_conf->rgw_op_thread_suicide_timeout, &m_tp) {
+ req_wq(this, g_conf()->rgw_op_thread_timeout,
+ g_conf()->rgw_op_thread_suicide_timeout, &m_tp) {
}
void RGWAsyncRadosProcessor::start() {
int ret;
RGWBucketInfo& bucket_info = target->get_bucket_info();
RGWRados::Bucket::List list_op(target);
- auto delay_ms = cct->_conf->get_val<int64_t>("rgw_lc_thread_delay");
+ auto delay_ms = cct->_conf.get_val<int64_t>("rgw_lc_thread_delay");
list_op.params.list_versions = false;
/* lifecycle processing does not depend on total order, so can
* take advantage of unorderd listing optimizations--such as
vector<rgw_bucket_dir_entry> objs;
RGWObjectCtx obj_ctx(store);
vector<std::string> result;
- auto delay_ms = cct->_conf->get_val<int64_t>("rgw_lc_thread_delay");
+ auto delay_ms = cct->_conf.get_val<int64_t>("rgw_lc_thread_delay");
boost::split(result, shard_id, boost::is_any_of(":"));
string bucket_tenant = result[0];
string bucket_name = result[1];
rgw_pool meta_pool;
rgw_raw_obj raw_obj;
int max_lock_secs_mp =
- s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time");
+ s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
utime_t dur(max_lock_secs_mp, 0);
store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
void RGWGetHealthCheck::execute()
{
- if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
- (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
+ if (!g_conf()->rgw_healthcheck_disabling_path.empty() &&
+ (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
/* Disabling path specified & existent in the filesystem. */
op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
} else {
* as ObjectStore::get_max_attr_name_length() can set the limit even
* lower than the "osd_max_attr_name_len" configurable. */
const size_t max_attr_name_len = \
- cct->_conf->get_val<size_t>("rgw_max_attr_name_len");
+ cct->_conf.get_val<size_t>("rgw_max_attr_name_len");
if (max_attr_name_len && attr_name.length() > max_attr_name_len) {
return -ENAMETOOLONG;
}
/* Similar remarks apply to the check for value size. We're veryfing
* it early at the RGW's side as it's being claimed in /info. */
const size_t max_attr_size = \
- cct->_conf->get_val<Option::size_t>("rgw_max_attr_size");
+ cct->_conf.get_val<Option::size_t>("rgw_max_attr_size");
if (max_attr_size && xattr.length() > max_attr_size) {
return -EFBIG;
}
/* Swift allows administrators to limit the number of metadats items
* send _in a single request_. */
const auto rgw_max_attrs_num_in_req = \
- cct->_conf->get_val<size_t>("rgw_max_attrs_num_in_req");
+ cct->_conf.get_val<size_t>("rgw_max_attrs_num_in_req");
if (rgw_max_attrs_num_in_req &&
++valid_meta_count > rgw_max_attrs_num_in_req) {
return -E2BIG;
RGWDataNotifierManager notify_mgr;
uint64_t interval_msec() override {
- return cct->_conf->get_val<int64_t>("rgw_data_notify_interval_msec");
+ return cct->_conf.get_val<int64_t>("rgw_data_notify_interval_msec");
}
void stop_process() override {
notify_mgr.stop();
void init(RGWRados *store) {
store->register_chained_cache(this);
- expiry = std::chrono::seconds(store->ctx()->_conf->get_val<uint64_t>(
+ expiry = std::chrono::seconds(store->ctx()->_conf.get_val<uint64_t>(
"rgw_cache_expiry_interval"));
}
void dump_bucket_from_state(struct req_state *s)
{
- if (g_conf->rgw_expose_bucket && ! s->bucket_name.empty()) {
+ if (g_conf()->rgw_expose_bucket && ! s->bucket_name.empty()) {
if (! s->bucket_tenant.empty()) {
dump_header(s, "Bucket",
url_encode(s->bucket_tenant + "/" + s->bucket_name));
// Map the listing of rgw_enable_apis in REVERSE order, so that items near
// the front of the list have a higher number assigned (and -1 for items not in the list).
list<string> apis;
- get_str_list(g_conf->rgw_enable_apis, apis);
+ get_str_list(g_conf()->rgw_enable_apis, apis);
int api_priority_s3 = -1;
int api_priority_s3website = -1;
auto api_s3website_priority_rawpos = std::find(apis.begin(), apis.end(), "s3website");
<< " in_hosted_domain_s3website=" << in_hosted_domain_s3website
<< dendl;
- if (g_conf->rgw_resolve_cname
+ if (g_conf()->rgw_resolve_cname
&& !in_hosted_domain
&& !in_hosted_domain_s3website) {
string cname;
}
}
- if (g_conf->rgw_print_continue) {
+ if (g_conf()->rgw_print_continue) {
const char *expect = info.env->get("HTTP_EXPECT");
s->expect_cont = (expect && !strcasecmp(expect, "100-continue"));
}
for (auto &it : crypt_http_responses)
dump_header(s, it.first, it.second);
s->formatter->open_object_section("PostResponse");
- if (g_conf->rgw_dns_name.length())
+ if (g_conf()->rgw_dns_name.length())
s->formatter->dump_format("Location", "%s/%s",
s->info.script_uri.c_str(),
s->object.name.c_str());
// read bucket trim configuration from ceph context
void configure_bucket_trim(CephContext *cct, BucketTrimConfig& config)
{
- auto conf = cct->_conf;
+ const auto& conf = cct->_conf;
config.trim_interval_sec =
- conf->get_val<int64_t>("rgw_sync_log_trim_interval");
+ conf.get_val<int64_t>("rgw_sync_log_trim_interval");
config.counter_size = 512;
config.buckets_per_interval =
- conf->get_val<int64_t>("rgw_sync_log_trim_max_buckets");
+ conf.get_val<int64_t>("rgw_sync_log_trim_max_buckets");
config.min_cold_buckets_per_interval =
- conf->get_val<int64_t>("rgw_sync_log_trim_min_cold_buckets");
+ conf.get_val<int64_t>("rgw_sync_log_trim_min_cold_buckets");
config.concurrent_buckets =
- conf->get_val<int64_t>("rgw_sync_log_trim_concurrent_buckets");
+ conf.get_val<int64_t>("rgw_sync_log_trim_concurrent_buckets");
config.notify_timeout_ms = 10000;
config.recent_size = 128;
config.recent_duration = std::chrono::hours(2);
int seed::get_params()
{
is_torrent = true;
- info.piece_length = g_conf->rgw_torrent_sha_unit;
- create_by = g_conf->rgw_torrent_createby;
- encoding = g_conf->rgw_torrent_encoding;
- origin = g_conf->rgw_torrent_origin;
- comment = g_conf->rgw_torrent_comment;
- announce = g_conf->rgw_torrent_tracker;
+ info.piece_length = g_conf()->rgw_torrent_sha_unit;
+ create_by = g_conf()->rgw_torrent_createby;
+ encoding = g_conf()->rgw_torrent_encoding;
+ origin = g_conf()->rgw_torrent_origin;
+ comment = g_conf()->rgw_torrent_comment;
+ announce = g_conf()->rgw_torrent_tracker;
/* tracker and tracker list is empty, set announce to origin */
if (announce.empty() && !origin.empty())
*
*/
#include "common/ConfUtils.h"
-#include "common/config.h"
+#include "common/config_proxy.h"
#include "common/errno.h"
#include "gtest/gtest.h"
#include "include/buffer.h"
}
TEST(ConfUtils, Overrides) {
- md_config_t conf;
+ ConfigProxy conf{false};
std::ostringstream warn;
std::string override_conf_1_f(next_tempfile(override_config_1));
- conf.name.set(CEPH_ENTITY_TYPE_MON, "0");
+ conf->name.set(CEPH_ENTITY_TYPE_MON, "0");
conf.parse_config_files(override_conf_1_f.c_str(), &warn, 0);
ASSERT_EQ(conf.parse_errors.size(), 0U);
ASSERT_EQ(conf.log_file, "global_log");
- conf.name.set(CEPH_ENTITY_TYPE_MDS, "a");
+ conf->name.set(CEPH_ENTITY_TYPE_MDS, "a");
conf.parse_config_files(override_conf_1_f.c_str(), &warn, 0);
ASSERT_EQ(conf.parse_errors.size(), 0U);
ASSERT_EQ(conf.log_file, "mds_log");
- conf.name.set(CEPH_ENTITY_TYPE_OSD, "0");
+ conf->name.set(CEPH_ENTITY_TYPE_OSD, "0");
conf.parse_config_files(override_conf_1_f.c_str(), &warn, 0);
ASSERT_EQ(conf.parse_errors.size(), 0U);
ASSERT_EQ(conf.log_file, "osd0_log");
}
TEST(ConfUtils, DupKey) {
- md_config_t conf;
+ ConfigProxy conf{false};
std::ostringstream warn;
std::string dup_key_config_f(next_tempfile(dup_key_config_1));
- conf.name.set(CEPH_ENTITY_TYPE_MDS, "a");
+ conf->name.set(CEPH_ENTITY_TYPE_MDS, "a");
conf.parse_config_files(dup_key_config_f.c_str(), &warn, 0);
ASSERT_EQ(conf.parse_errors.size(), 0U);
ASSERT_EQ(conf.log_file, string("3"));
TEST(DaemonConfig, SimpleSet) {
int ret;
- ret = g_ceph_context->_conf->set_val("log_graylog_port", "21");
+ ret = g_ceph_context->_conf.set_val("log_graylog_port", "21");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.apply_changes(nullptr);
char buf[128];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- ret = g_ceph_context->_conf->get_val("log_graylog_port", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_graylog_port", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("21"), string(buf));
- g_ceph_context->_conf->rm_val("log_graylog_port");
+ g_ceph_context->_conf.rm_val("log_graylog_port");
}
TEST(DaemonConfig, Substitution) {
int ret;
- g_conf->_clear_safe_to_start_threads();
- ret = g_ceph_context->_conf->set_val("host", "foo");
+ g_conf()._clear_safe_to_start_threads();
+ ret = g_ceph_context->_conf.set_val("host", "foo");
ASSERT_EQ(0, ret);
- ret = g_ceph_context->_conf->set_val("public_network", "bar$host.baz");
+ ret = g_ceph_context->_conf.set_val("public_network", "bar$host.baz");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.apply_changes(nullptr);
char buf[128];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- ret = g_ceph_context->_conf->get_val("public_network", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("public_network", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("barfoo.baz"), string(buf));
}
TEST(DaemonConfig, SubstitutionTrailing) {
int ret;
- g_conf->_clear_safe_to_start_threads();
- ret = g_ceph_context->_conf->set_val("host", "foo");
+ g_conf()._clear_safe_to_start_threads();
+ ret = g_ceph_context->_conf.set_val("host", "foo");
ASSERT_EQ(0, ret);
- ret = g_ceph_context->_conf->set_val("public_network", "bar$host");
+ ret = g_ceph_context->_conf.set_val("public_network", "bar$host");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.apply_changes(nullptr);
char buf[128];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- ret = g_ceph_context->_conf->get_val("public_network", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("public_network", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("barfoo"), string(buf));
}
TEST(DaemonConfig, SubstitutionBraces) {
int ret;
- g_conf->_clear_safe_to_start_threads();
- ret = g_ceph_context->_conf->set_val("host", "foo");
+ g_conf()._clear_safe_to_start_threads();
+ ret = g_ceph_context->_conf.set_val("host", "foo");
ASSERT_EQ(0, ret);
- ret = g_ceph_context->_conf->set_val("public_network", "bar${host}baz");
+ ret = g_ceph_context->_conf.set_val("public_network", "bar${host}baz");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.apply_changes(nullptr);
char buf[128];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- ret = g_ceph_context->_conf->get_val("public_network", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("public_network", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("barfoobaz"), string(buf));
}
TEST(DaemonConfig, SubstitutionBracesTrailing) {
int ret;
- g_conf->_clear_safe_to_start_threads();
- ret = g_ceph_context->_conf->set_val("host", "foo");
+ g_conf()._clear_safe_to_start_threads();
+ ret = g_ceph_context->_conf.set_val("host", "foo");
ASSERT_EQ(0, ret);
- ret = g_ceph_context->_conf->set_val("public_network", "bar${host}");
+ ret = g_ceph_context->_conf.set_val("public_network", "bar${host}");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.apply_changes(NULL);
char buf[128];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- ret = g_ceph_context->_conf->get_val("public_network", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("public_network", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("barfoo"), string(buf));
}
// config: variable substitution happen only once http://tracker.ceph.com/issues/7103
TEST(DaemonConfig, SubstitutionMultiple) {
int ret;
- ret = g_ceph_context->_conf->set_val("mon_host", "localhost");
+ ret = g_ceph_context->_conf.set_val("mon_host", "localhost");
ASSERT_EQ(0, ret);
- ret = g_ceph_context->_conf->set_val("keyring", "$mon_host/$cluster.keyring,$mon_host/$cluster.mon.keyring");
+ ret = g_ceph_context->_conf.set_val("keyring", "$mon_host/$cluster.keyring,$mon_host/$cluster.mon.keyring");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.apply_changes(NULL);
char buf[512];
memset(buf, 0, sizeof(buf));
char *tmp = buf;
- ret = g_ceph_context->_conf->get_val("keyring", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("keyring", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("localhost/ceph.keyring,localhost/ceph.mon.keyring"), tmp);
ASSERT_TRUE(strchr(buf, '$') == NULL);
}
TEST(DaemonConfig, ArgV) {
- g_conf->_clear_safe_to_start_threads();
+ g_conf()._clear_safe_to_start_threads();
int ret;
const char *argv[] = { "foo", "--log-graylog-port", "22",
size_t argc = (sizeof(argv) / sizeof(argv[0])) - 1;
vector<const char*> args;
argv_to_vec(argc, argv, args);
- g_ceph_context->_conf->parse_argv(args);
- g_ceph_context->_conf->apply_changes(NULL);
+ g_ceph_context->_conf.parse_argv(args);
+ g_ceph_context->_conf.apply_changes(nullptr);
char buf[128];
char *tmp = buf;
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("key", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("key", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("my-key"), string(buf));
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_graylog_port", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_graylog_port", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("22"), string(buf));
- g_conf->set_safe_to_start_threads();
+ g_conf().set_safe_to_start_threads();
}
TEST(DaemonConfig, InjectArgs) {
int ret;
std::string injection("--log-graylog-port 56 --leveldb-max-open-files 42");
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(0, ret);
char buf[128];
char *tmp = buf;
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("leveldb_max_open_files", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("leveldb_max_open_files", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("42"), string(buf));
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_graylog_port", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_graylog_port", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("56"), string(buf));
injection = "--log-graylog-port 57";
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(0, ret);
- ret = g_ceph_context->_conf->get_val("log_graylog_port", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_graylog_port", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("57"), string(buf));
}
// We should complain about the garbage in the input
std::string injection("--random-garbage-in-injectargs 26 --log-graylog-port 28");
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(-EINVAL, ret);
// But, debug should still be set...
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_graylog_port", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_graylog_port", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("28"), string(buf));
// What's the current value of osd_data?
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("osd_data", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("osd_data", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
// Injectargs shouldn't let us change this, since it is a string-valued
// variable and there isn't an observer for it.
std::string injection2("--osd_data /tmp/some-other-directory --log-graylog-port 4");
- ret = g_ceph_context->_conf->injectargs(injection2, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection2, &cout);
ASSERT_EQ(-ENOSYS, ret);
// It should be unchanged.
memset(buf2, 0, sizeof(buf2));
- ret = g_ceph_context->_conf->get_val("osd_data", &tmp2, sizeof(buf2));
+ ret = g_ceph_context->_conf.get_val("osd_data", &tmp2, sizeof(buf2));
ASSERT_EQ(0, ret);
ASSERT_EQ(string(buf), string(buf2));
// We should complain about the missing arguments.
std::string injection3("--log-graylog-port 28 --debug_ms");
- ret = g_ceph_context->_conf->injectargs(injection3, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection3, &cout);
ASSERT_EQ(-EINVAL, ret);
}
// Change log_to_syslog
std::string injection("--log_to_syslog --log-graylog-port 28");
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(0, ret);
// log_to_syslog should be set...
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_to_syslog", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_to_syslog", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("true"), string(buf));
// Turn off log_to_syslog
injection = "--log_to_syslog=false --log-graylog-port 28";
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(0, ret);
// log_to_syslog should be cleared...
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_to_syslog", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_to_syslog", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("false"), string(buf));
// Turn on log_to_syslog
injection = "--log-graylog-port=1 --log_to_syslog=true --leveldb-max-open-files 40";
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(0, ret);
// log_to_syslog should be set...
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_to_syslog", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_to_syslog", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("true"), string(buf));
// parse error
injection = "--log-graylog-port 1 --log_to_syslog=falsey --leveldb-max-open-files 42";
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(-EINVAL, ret);
// log_to_syslog should still be set...
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_to_syslog", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_to_syslog", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("true"), string(buf));
// debug-ms should still become 42...
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("leveldb_max_open_files", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("leveldb_max_open_files", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("42"), string(buf));
}
std::string injection("--log_file ");
injection += tmpfile;
// We're allowed to change log_file because there is an observer.
- ret = g_ceph_context->_conf->injectargs(injection, &cout);
+ ret = g_ceph_context->_conf.injectargs(injection, &cout);
ASSERT_EQ(0, ret);
// It should have taken effect.
char buf[128];
char *tmp = buf;
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("log_file", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("log_file", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string(buf), string(tmpfile));
ASSERT_EQ(0, access(tmpfile, R_OK));
// Let's turn off the logfile.
- ret = g_ceph_context->_conf->set_val("log_file", "");
+ ret = g_ceph_context->_conf.set_val("log_file", "");
ASSERT_EQ(0, ret);
- g_ceph_context->_conf->apply_changes(NULL);
- ret = g_ceph_context->_conf->get_val("log_file", &tmp, sizeof(buf));
+ g_ceph_context->_conf.apply_changes(NULL);
+ ret = g_ceph_context->_conf.get_val("log_file", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string(""), string(buf));
int ret;
// Verify that we can't change this, since safe_to_start_threads has
// been set.
- ret = g_ceph_context->_conf->set_val("osd_data", "");
+ ret = g_ceph_context->_conf.set_val("osd_data", "");
ASSERT_EQ(-ENOSYS, ret);
- g_conf->_clear_safe_to_start_threads();
+ g_conf()._clear_safe_to_start_threads();
// Ok, now we can change this. Since this is just a test, and there are no
// OSD threads running, we know changing osd_data won't actually blow up the
// world.
- ret = g_ceph_context->_conf->set_val("osd_data", "/tmp/crazydata");
+ ret = g_ceph_context->_conf.set_val("osd_data", "/tmp/crazydata");
ASSERT_EQ(0, ret);
char buf[128];
char *tmp = buf;
memset(buf, 0, sizeof(buf));
- ret = g_ceph_context->_conf->get_val("osd_data", &tmp, sizeof(buf));
+ ret = g_ceph_context->_conf.get_val("osd_data", &tmp, sizeof(buf));
ASSERT_EQ(0, ret);
ASSERT_EQ(string("/tmp/crazydata"), string(buf));
- g_conf->_clear_safe_to_start_threads();
+ g_conf()._clear_safe_to_start_threads();
ASSERT_EQ(0, ret);
}
TEST(DaemonConfig, InvalidIntegers) {
{
- int ret = g_ceph_context->_conf->set_val("log_graylog_port", "rhubarb");
+ int ret = g_ceph_context->_conf.set_val("log_graylog_port", "rhubarb");
ASSERT_EQ(-EINVAL, ret);
}
int64_t max = std::numeric_limits<int64_t>::max();
string str = boost::lexical_cast<string>(max);
str = str + "999"; // some extra digits to take us out of bounds
- int ret = g_ceph_context->_conf->set_val("log_graylog_port", str);
+ int ret = g_ceph_context->_conf.set_val("log_graylog_port", str);
ASSERT_EQ(-EINVAL, ret);
}
- g_ceph_context->_conf->rm_val("log_graylog_port");
+ g_ceph_context->_conf.rm_val("log_graylog_port");
}
TEST(DaemonConfig, InvalidFloats) {
{
double bad_value = 2 * (double)std::numeric_limits<float>::max();
string str = boost::lexical_cast<string>(-bad_value);
- int ret = g_ceph_context->_conf->set_val("log_stop_at_utilization", str);
+ int ret = g_ceph_context->_conf.set_val("log_stop_at_utilization", str);
ASSERT_EQ(-EINVAL, ret);
}
{
double bad_value = 2 * (double)std::numeric_limits<float>::max();
string str = boost::lexical_cast<string>(bad_value);
- int ret = g_ceph_context->_conf->set_val("log_stop_at_utilization", str);
+ int ret = g_ceph_context->_conf.set_val("log_stop_at_utilization", str);
ASSERT_EQ(-EINVAL, ret);
}
{
- int ret = g_ceph_context->_conf->set_val("log_stop_at_utilization", "not a float");
+ int ret = g_ceph_context->_conf.set_val("log_stop_at_utilization", "not a float");
ASSERT_EQ(-EINVAL, ret);
}
}
/*
* Local Variables:
- * compile-command: "cd .. ; make unittest_daemon_config && ./unittest_daemon_config"
+ * compile-command: "cd ../../build ; \
+ * make unittest_daemon_config && ./bin/unittest_daemon_config"
* End:
*/
librados::TestRadosClient *create_rados_client() {
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
- cct->_conf->parse_env();
- cct->_conf->apply_changes(nullptr);
+ cct->_conf.parse_env();
+ cct->_conf.apply_changes(nullptr);
auto rados_client =
librados_test_stub::get_cluster()->create_rados_client(cct);
librados::TestRadosClient *impl =
reinterpret_cast<librados::TestRadosClient*>(cluster);
CephContext *cct = impl->cct();
- return cct->_conf->set_val(option, value);
+ return cct->_conf.set_val(option, value);
}
extern "C" int rados_conf_parse_env(rados_t cluster, const char *var) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
- md_config_t *conf = client->cct()->_conf;
- conf->parse_env(var);
- conf->apply_changes(NULL);
+ auto& conf = client->cct()->_conf;
+ conf.parse_env(var);
+ conf.apply_changes(NULL);
return 0;
}
extern "C" int rados_conf_read_file(rados_t cluster, const char *path) {
librados::TestRadosClient *client =
reinterpret_cast<librados::TestRadosClient*>(cluster);
- md_config_t *conf = client->cct()->_conf;
- int ret = conf->parse_config_files(path, NULL, 0);
+ auto& conf = client->cct()->_conf;
+ int ret = conf.parse_config_files(path, NULL, 0);
if (ret == 0) {
- conf->parse_env();
- conf->apply_changes(NULL);
- conf->complain_about_parse_errors(client->cct());
+ conf.parse_env();
+ conf.apply_changes(NULL);
+ conf.complain_about_parse_errors(client->cct());
} else if (ret == -ENOENT) {
// ignore missing client config
return 0;
CephContext *cct = impl->cct();
char *str = NULL;
- int ret = cct->_conf->get_val(option, &str, -1);
+ int ret = cct->_conf.get_val(option, &str, -1);
if (ret != 0) {
free(str);
return ret;
REQUIRE_FEATURE(RBD_FEATURE_JOURNALING);
CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
- REQUIRE(!cct->_conf->get_val<bool>("rbd_skip_partial_discard"));
+ REQUIRE(!cct->_conf.get_val<bool>("rbd_skip_partial_discard"));
librbd::ImageCtx *ictx;
ASSERT_EQ(0, open_image(m_image_name, &ictx));
REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
- REQUIRE(!cct->_conf->get_val<bool>("rbd_skip_partial_discard"));
+ REQUIRE(!cct->_conf.get_val<bool>("rbd_skip_partial_discard"));
m_image_name = get_temp_image_name();
m_image_size = 1 << 14;
// we keep this stuff 'unsafe' out of test case scope to be able to update ANY
// config settings. Hence setting it to 'safe' here to proceed with the test
// case
- g_conf->set_safe_to_start_threads();
+ g_conf().set_safe_to_start_threads();
}
void StoreTestFixture::TearDown()
{
// we keep this stuff 'unsafe' out of test case scope to be able to update ANY
// config settings. Hence setting it to 'unsafe' here as test case is closing.
- g_conf->_clear_safe_to_start_threads();
+ g_conf()._clear_safe_to_start_threads();
PopSettings(0);
if (store) {
int r = store->umount();
"0"));
CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct());
- std::string policy_type = cct->_conf->get_val<string>("rbd_mirror_image_policy_type");
+ std::string policy_type = cct->_conf.get_val<string>("rbd_mirror_image_policy_type");
if (policy_type == "none" || policy_type == "simple") {
m_policy = image_map::SimplePolicy::create(m_local_io_ctx);
TestImageReplayer()
: m_local_cluster(new librados::Rados()), m_watch_handle(0)
{
- EXPECT_EQ(0, g_ceph_context->_conf->get_val("rbd_mirror_journal_commit_age",
+ EXPECT_EQ(0, g_ceph_context->_conf.get_val("rbd_mirror_journal_commit_age",
&m_journal_commit_age));
- EXPECT_EQ(0, g_ceph_context->_conf->set_val("rbd_mirror_journal_commit_age",
+ EXPECT_EQ(0, g_ceph_context->_conf.set_val("rbd_mirror_journal_commit_age",
"0.1"));
EXPECT_EQ("", connect_cluster_pp(*m_local_cluster.get()));
EXPECT_EQ(0, m_remote_cluster.pool_delete(m_remote_pool_name.c_str()));
EXPECT_EQ(0, m_local_cluster->pool_delete(m_local_pool_name.c_str()));
- EXPECT_EQ(0, g_ceph_context->_conf->set_val("rbd_mirror_journal_commit_age",
+ EXPECT_EQ(0, g_ceph_context->_conf.set_val("rbd_mirror_journal_commit_age",
m_journal_commit_age));
}
#include "common/ConfUtils.h"
#include "common/ceph_argparse.h"
-#include "common/config.h"
+#include "common/config_proxy.h"
#include "global/global_context.h"
#include "global/global_init.h"
}
common_init_finish(g_ceph_context);
- EntityName ename(g_conf->name);
+ EntityName ename(g_conf()->name);
// Enforce the use of gen-key or add-key when creating to avoid ending up
// with an "empty" key (key = AAAAAAAAAAAAAAAA)
const std::map<string,string>& filter_key_value)
{
std::vector <std::string> sections;
- int ret = g_conf->get_all_sections(sections);
+ int ret = g_conf().get_all_sections(sections);
if (ret)
return 2;
for (std::vector<std::string>::const_iterator p = sections.begin();
int r = 0;
for (std::list<string>::const_iterator q = filter_key.begin(); q != filter_key.end(); ++q) {
string v;
- r = g_conf->get_val_from_conf_file(sec, q->c_str(), v, false);
+ r = g_conf().get_val_from_conf_file(sec, q->c_str(), v, false);
if (r < 0)
break;
}
q != filter_key_value.end();
++q) {
string v;
- r = g_conf->get_val_from_conf_file(sec, q->first.c_str(), v, false);
+ r = g_conf().get_val_from_conf_file(sec, q->first.c_str(), v, false);
if (r < 0 || v != q->second) {
r = -1;
break;
for (deque<string>::const_iterator s = sections.begin(); s != sections.end(); ++s) {
my_sections.push_back(*s);
}
- g_conf->get_my_sections(my_sections);
+ g_conf().get_my_sections(my_sections);
std::string val;
- int ret = g_conf->get_val_from_conf_file(my_sections, key.c_str(), val, true);
+ int ret = g_conf().get_val_from_conf_file(my_sections, key.c_str(), val, true);
if (ret == -ENOENT)
return 1;
else if (ret == 0) {
static int dump_all(const string& format)
{
if (format == "" || format == "plain") {
- g_conf->show_config(std::cout);
+ g_conf().show_config(std::cout);
return 0;
} else {
unique_ptr<Formatter> f(Formatter::create(format));
if (f) {
f->open_object_section("ceph-conf");
- g_conf->show_config(f.get());
+ g_conf().show_config(f.get());
f->close_section();
f->flush(std::cout);
return 0;
[](CephContext *p) {p->put();}
};
- g_conf->apply_changes(NULL);
- g_conf->complain_about_parse_errors(g_ceph_context);
+ g_conf().apply_changes(nullptr);
+ g_conf().complain_about_parse_errors(g_ceph_context);
// do not common_init_finish(); do not start threads; do not do any of thing
// wonky things the daemon whose conf we are examining would do (like initialize
monmap.created = ceph_clock_now();
monmap.last_changed = monmap.created;
srand(getpid() + time(0));
- if (g_conf->get_val<uuid_d>("fsid").is_zero()) {
+ if (g_conf().get_val<uuid_d>("fsid").is_zero()) {
monmap.generate_fsid();
cout << me << ": generated fsid " << monmap.fsid << std::endl;
}
if (filter) {
// apply initial members
list<string> initial_members;
- get_str_list(g_conf->mon_initial_members, initial_members);
+ get_str_list(g_conf()->mon_initial_members, initial_members);
if (!initial_members.empty()) {
cout << "initial_members " << initial_members << ", filtering seed monmap" << std::endl;
set<entity_addr_t> removed;
modified = true;
}
- if (!g_conf->get_val<uuid_d>("fsid").is_zero()) {
- monmap.fsid = g_conf->get_val<uuid_d>("fsid");
+ if (!g_conf().get_val<uuid_d>("fsid").is_zero()) {
+ monmap.fsid = g_conf().get_val<uuid_d>("fsid");
cout << me << ": set fsid to " << monmap.fsid << std::endl;
modified = true;
}
}
if (format_specified) {
- int r = g_conf->set_val("rbd_default_format", stringify(format));
+ int r = g_conf().set_val("rbd_default_format", stringify(format));
assert(r == 0);
opts->set(RBD_IMAGE_OPTION_FORMAT, format);
}
void ImageDeleter<I>::remove_images() {
dout(10) << dendl;
- uint64_t max_concurrent_deletions = g_ceph_context->_conf->get_val<uint64_t>(
+ uint64_t max_concurrent_deletions = g_ceph_context->_conf.get_val<uint64_t>(
"rbd_mirror_concurrent_image_deletions");
Mutex::Locker locker(m_lock);
image_deleter::ERROR_RESULT_RETRY_IMMEDIATELY) {
enqueue_failed_delete(&delete_info, r, m_busy_interval);
} else {
- double failed_interval = g_ceph_context->_conf->get_val<double>(
+ double failed_interval = g_ceph_context->_conf.get_val<double>(
"rbd_mirror_delete_retry_interval");
enqueue_failed_delete(&delete_info, r, failed_interval);
}
});
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
- double after = cct->_conf->get_val<double>("rbd_mirror_image_policy_update_throttle_interval");
+ double after = cct->_conf.get_val<double>("rbd_mirror_image_policy_update_throttle_interval");
dout(20) << "scheduling image check update (" << m_timer_task << ")"
<< " after " << after << " second(s)" << dendl;
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
// fetch the updated value of idle timeout for (re)scheduling
- double resched_after = cct->_conf->get_val<double>(
+ double resched_after = cct->_conf.get_val<double>(
"rbd_mirror_image_policy_rebalance_timeout");
if (!resched_after) {
return;
dout(20) << dendl;
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
- std::string policy_type = cct->_conf->get_val<string>("rbd_mirror_image_policy_type");
+ std::string policy_type = cct->_conf.get_val<string>("rbd_mirror_image_policy_type");
if (policy_type == "none" || policy_type == "simple") {
m_policy.reset(image_map::SimplePolicy::create(m_ioctx));
const std::vector<std::string> &instance_ids,
std::vector<std::string> *filtered_instance_ids, bool removal) const {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
- std::string policy_type = cct->_conf->get_val<string>("rbd_mirror_image_policy_type");
+ std::string policy_type = cct->_conf.get_val<string>("rbd_mirror_image_policy_type");
if (policy_type != "none") {
*filtered_instance_ids = instance_ids;
{
CephContext *cct = static_cast<CephContext *>(m_local->cct());
- double poll_seconds = cct->_conf->get_val<double>(
+ double poll_seconds = cct->_conf.get_val<double>(
"rbd_mirror_journal_poll_age");
Mutex::Locker locker(m_lock);
m_work_queue(work_queue), m_instance_watcher(instance_watcher),
m_progress_ctx(progress_ctx),
m_lock(unique_lock_name("ImageSync::m_lock", this)),
- m_update_sync_point_interval(m_local_image_ctx->cct->_conf->template get_val<double>(
+ m_update_sync_point_interval(m_local_image_ctx->cct->_conf.template get_val<double>(
"rbd_mirror_sync_point_update_age")), m_client_meta_copy(*client_meta) {
}
ImageSyncThrottler<I>::ImageSyncThrottler()
: m_lock(librbd::util::unique_lock_name("rbd::mirror::ImageSyncThrottler",
this)),
- m_max_concurrent_syncs(g_ceph_context->_conf->get_val<uint64_t>(
+ m_max_concurrent_syncs(g_ceph_context->_conf.get_val<uint64_t>(
"rbd_mirror_concurrent_image_syncs")) {
dout(20) << "max_concurrent_syncs=" << m_max_concurrent_syncs << dendl;
- g_ceph_context->_conf->add_observer(this);
+ g_ceph_context->_conf.add_observer(this);
}
template <typename I>
ImageSyncThrottler<I>::~ImageSyncThrottler() {
- g_ceph_context->_conf->remove_observer(this);
+ g_ceph_context->_conf.remove_observer(this);
Mutex::Locker locker(m_lock);
assert(m_inflight_ops.empty());
queue_start_image_replayers();
});
- int after = g_ceph_context->_conf->get_val<int64_t>(
+ int after = g_ceph_context->_conf.get_val<int64_t>(
"rbd_mirror_image_state_check_interval");
dout(10) << "scheduling image state check after " << after << " sec (task "
m_lock(unique_lock_name("rbd::mirror::InstanceWatcher::m_lock", this)),
m_instance_lock(librbd::ManagedLock<I>::create(
m_ioctx, m_work_queue, m_oid, this, librbd::managed_lock::EXCLUSIVE, true,
- m_cct->_conf->get_val<int64_t>("rbd_blacklist_expire_seconds"))) {
+ m_cct->_conf.get_val<int64_t>("rbd_blacklist_expire_seconds"))) {
}
template <typename I>
return;
}
- int after = m_cct->_conf->get_val<int64_t>("rbd_mirror_leader_heartbeat_interval") *
- (1 + m_cct->_conf->get_val<int64_t>("rbd_mirror_leader_max_missed_heartbeats") +
- m_cct->_conf->get_val<int64_t>("rbd_mirror_leader_max_acquire_attempts_before_break"));
+ int after = m_cct->_conf.get_val<int64_t>("rbd_mirror_leader_heartbeat_interval") *
+ (1 + m_cct->_conf.get_val<int64_t>("rbd_mirror_leader_max_missed_heartbeats") +
+ m_cct->_conf.get_val<int64_t>("rbd_mirror_leader_max_acquire_attempts_before_break"));
bool schedule = false;
utime_t oldest_time = time;
m_notifier_id(librados::Rados(io_ctx).get_instance_id()),
m_instance_id(stringify(m_notifier_id)),
m_leader_lock(new LeaderLock(m_ioctx, m_work_queue, m_oid, this, true,
- m_cct->_conf->get_val<int64_t>(
+ m_cct->_conf.get_val<int64_t>(
"rbd_blacklist_expire_seconds"))) {
}
m_timer_gate->timer_callback = timer_callback;
});
- int after = delay_factor * m_cct->_conf->get_val<int64_t>(
+ int after = delay_factor * m_cct->_conf.get_val<int64_t>(
"rbd_mirror_leader_heartbeat_interval");
dout(10) << "scheduling " << name << " after " << after << " sec (task "
}
}
- if (m_acquire_attempts >= m_cct->_conf->get_val<int64_t>(
+ if (m_acquire_attempts >= m_cct->_conf.get_val<int64_t>(
"rbd_mirror_leader_max_acquire_attempts_before_break")) {
dout(0) << "breaking leader lock after " << m_acquire_attempts << " "
<< "failed attempts to acquire" << dendl;
schedule_timer_task("acquire leader lock",
delay_factor *
- m_cct->_conf->get_val<int64_t>("rbd_mirror_leader_max_missed_heartbeats"),
+ m_cct->_conf.get_val<int64_t>("rbd_mirror_leader_max_missed_heartbeats"),
false, &LeaderWatcher<I>::acquire_leader_lock, false);
}
}
m_cond.WaitInterval(
m_lock,
- utime_t(m_cct->_conf->get_val<int64_t>("rbd_mirror_pool_replayers_refresh_interval"), 0));
+ utime_t(m_cct->_conf.get_val<int64_t>("rbd_mirror_pool_replayers_refresh_interval"), 0));
}
// stop all pool replayers in parallel
cct->_conf->cluster = cluster_name;
// librados::Rados::conf_read_file
- int r = cct->_conf->parse_config_files(nullptr, nullptr, 0);
+ int r = cct->_conf.parse_config_files(nullptr, nullptr, 0);
if (r < 0) {
derr << "could not read ceph conf for " << description << ": "
<< cpp_strerror(r) << dendl;
// remote peer connections shouldn't apply cluster-specific
// configuration settings
for (auto& key : UNIQUE_PEER_CONFIG_KEYS) {
- config_values[key] = cct->_conf->get_val<std::string>(key);
+ config_values[key] = cct->_conf.get_val<std::string>(key);
}
}
- cct->_conf->parse_env();
+ cct->_conf.parse_env();
// librados::Rados::conf_parse_env
std::vector<const char*> args;
- r = cct->_conf->parse_argv(args);
+ r = cct->_conf.parse_argv(args);
if (r < 0) {
derr << "could not parse environment for " << description << ":"
<< cpp_strerror(r) << dendl;
cct->put();
return r;
}
- cct->_conf->parse_env();
+ cct->_conf.parse_env();
if (!m_args.empty()) {
// librados::Rados::conf_parse_argv
args = m_args;
- r = cct->_conf->parse_argv(args);
+ r = cct->_conf.parse_argv(args);
if (r < 0) {
derr << "could not parse command line args for " << description << ": "
<< cpp_strerror(r) << dendl;
// remote peer connections shouldn't apply cluster-specific
// configuration settings
for (auto& pair : config_values) {
- auto value = cct->_conf->get_val<std::string>(pair.first);
+ auto value = cct->_conf.get_val<std::string>(pair.first);
if (pair.second != value) {
dout(0) << "reverting global config option override: "
<< pair.first << ": " << value << " -> " << pair.second
<< dendl;
- cct->_conf->set_val_or_die(pair.first, pair.second);
+ cct->_conf.set_val_or_die(pair.first, pair.second);
}
}
}
if (!g_ceph_context->_conf->admin_socket.empty()) {
- cct->_conf->set_val_or_die("admin_socket",
+ cct->_conf.set_val_or_die("admin_socket",
"$run_dir/$name.$pid.$cluster.$cctid.asok");
}
// disable unnecessary librbd cache
- cct->_conf->set_val_or_die("rbd_cache", "false");
- cct->_conf->apply_changes(nullptr);
- cct->_conf->complain_about_parse_errors(cct);
+ cct->_conf.set_val_or_die("rbd_cache", "false");
+ cct->_conf.apply_changes(nullptr);
+ cct->_conf.complain_about_parse_errors(cct);
r = (*rados_ref)->init_with_context(cct);
assert(r == 0);
}
f->dump_string("local_cluster_admin_socket",
- reinterpret_cast<CephContext *>(m_local_io_ctx.cct())->_conf->
+ reinterpret_cast<CephContext *>(m_local_io_ctx.cct())->_conf.
get_val<std::string>("admin_socket"));
f->dump_string("remote_cluster_admin_socket",
- reinterpret_cast<CephContext *>(m_remote_io_ctx.cct())->_conf->
+ reinterpret_cast<CephContext *>(m_remote_io_ctx.cct())->_conf.
get_val<std::string>("admin_socket"));
f->open_object_section("sync_throttler");
template <typename I>
Threads<I>::Threads(CephContext *cct) : timer_lock("Threads::timer_lock") {
thread_pool = new ThreadPool(cct, "Journaler::thread_pool", "tp_journal",
- cct->_conf->get_val<int64_t>("rbd_op_threads"),
+ cct->_conf.get_val<int64_t>("rbd_op_threads"),
"rbd_op_threads");
thread_pool->start();
work_queue = new ContextWQ("Journaler::work_queue",
- cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
+ cct->_conf.get_val<int64_t>("rbd_op_thread_timeout"),
thread_pool);
timer = new SafeTimer(cct, timer_lock, true);
assert(m_map_lock.is_locked());
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
- int migration_throttle = cct->_conf->get_val<int64_t>(
+ int migration_throttle = cct->_conf.get_val<int64_t>(
"rbd_mirror_image_policy_migration_throttle");
auto it = m_image_states.find(global_image_id);
// 3. Don't set the data pool explicitly.
std::string data_pool;
librados::Rados local_rados(m_local_io_ctx);
- auto default_data_pool = g_ceph_context->_conf->get_val<std::string>("rbd_default_data_pool");
+ auto default_data_pool = g_ceph_context->_conf.get_val<std::string>("rbd_default_data_pool");
auto remote_md_pool = m_remote_image_ctx->md_ctx.get_pool_name();
auto remote_data_pool = m_remote_image_ctx->data_ctx.get_pool_name();
dout(20) << dendl;
journal::Settings settings;
- settings.commit_interval = g_ceph_context->_conf->get_val<double>(
+ settings.commit_interval = g_ceph_context->_conf.get_val<double>(
"rbd_mirror_journal_commit_age");
- settings.max_fetch_bytes = g_ceph_context->_conf->get_val<Option::size_t>(
+ settings.max_fetch_bytes = g_ceph_context->_conf.get_val<Option::size_t>(
"rbd_mirror_journal_max_fetch_bytes");
assert(*m_remote_journaler == nullptr);
CephInitParameters iparams = ceph_argparse_early_args(
args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
- md_config_t config;
- config.name = iparams.name;
- config.cluster = cluster;
+ ConfigProxy config{false};
+ config->name = iparams.name;
+ config->cluster = cluster;
if (!conf_file_list.empty()) {
config.parse_config_files(conf_file_list.c_str(), nullptr, 0);