OPTION(mon_osd_auto_mark_new_in, OPT_BOOL) // mark booting new osds 'in'
OPTION(mon_osd_destroyed_out_interval, OPT_INT) // seconds
OPTION(mon_osd_down_out_interval, OPT_INT) // seconds
-OPTION(mon_osd_down_out_subtree_limit, OPT_STR) // smallest crush unit/type that we will not automatically mark out
OPTION(mon_osd_min_up_ratio, OPT_DOUBLE) // min osds required to be up to mark things down
OPTION(mon_osd_min_in_ratio, OPT_DOUBLE) // min osds required to be in to mark things out
OPTION(mon_osd_warn_op_age, OPT_DOUBLE) // max op age before we generate a warning (make it a power of 2)
Option("mon_osd_down_out_subtree_limit", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("rack")
+ .set_flag(Option::FLAG_RUNTIME)
.add_service("mon")
.set_description("do not automatically mark OSDs 'out' if an entire subtree of this size is down")
.add_see_also("mon_osd_down_out_interval"),
* ratio set by g_conf()->mon_osd_min_in_ratio. So it's not really up to us.
*/
if (can_mark_out(-1)) {
+ string down_out_subtree_limit = g_conf().get_val<string>(
+ "mon_osd_down_out_subtree_limit");
set<int> down_cache; // quick cache of down subtrees
map<int,utime_t>::iterator i = down_pending_out.begin();
}
// is this an entire large subtree down?
- if (g_conf()->mon_osd_down_out_subtree_limit.length()) {
- int type = osdmap.crush->get_type_id(g_conf()->mon_osd_down_out_subtree_limit);
+ if (down_out_subtree_limit.length()) {
+ int type = osdmap.crush->get_type_id(down_out_subtree_limit);
if (type > 0) {
if (osdmap.containing_subtree_is_down(cct, o, type, &down_cache)) {
- dout(10) << "tick entire containing " << g_conf()->mon_osd_down_out_subtree_limit
- << " subtree for osd." << o << " is down; resetting timer" << dendl;
+ dout(10) << "tick entire containing " << down_out_subtree_limit
+ << " subtree for osd." << o
+ << " is down; resetting timer" << dendl;
// reset timer, too.
down_pending_out[o] = now;
continue;