* also mark "mon_osd_reporter_subtree_level" safe.
* change "mon_osd_min_down_reporters" to unsigned
* change "osd_pool_default_size" to unsigned
* change "osd_pool_default_min_size" to unsigned
* change "osd_pool_default_pg_num" to unsigned
* change "osd_pool_default_pgp_num" to unsigned
Signed-off-by: Kefu Chai <kchai@redhat.com>
public:
unsigned get_osd_pool_default_min_size() const {
- return osd_pool_default_min_size ?
- MIN(osd_pool_default_min_size, osd_pool_default_size) :
- osd_pool_default_size - osd_pool_default_size / 2;
+ auto min_size = get_val<uint64_t>("osd_pool_default_min_size");
+ auto size = get_val<uint64_t>("osd_pool_default_size");
+ return min_size ? std::min(min_size, size) : (size - size / 2);
}
/** A lock that protects the md_config_t internals. It is
OPTION(mon_sync_max_payload_size, OPT_U32) // max size for a sync chunk payload (say)
OPTION(mon_sync_debug, OPT_BOOL) // enable sync-specific debug
OPTION(mon_inject_sync_get_chunk_delay, OPT_DOUBLE) // inject N second delay on each get_chunk request
-OPTION(mon_osd_min_down_reporters, OPT_INT) // number of OSDs from different subtrees who need to report a down OSD for it to count
-OPTION(mon_osd_reporter_subtree_level , OPT_STR) // in which level of parent bucket the reporters are counted
OPTION(mon_osd_force_trim_to, OPT_INT) // force mon to trim maps to this point, regardless of min_last_epoch_clean (dangerous)
OPTION(mon_mds_force_trim_to, OPT_INT) // force mon to trim mdsmaps to this point (dangerous)
OPTION(mon_mds_skip_sanity, OPT_BOOL) // skip safety assertions on FSMap (in case of bugs where we want to continue anyway)
OPTION(osd_crush_update_on_start, OPT_BOOL)
OPTION(osd_class_update_on_start, OPT_BOOL) // automatically set device class on start
OPTION(osd_crush_initial_weight, OPT_DOUBLE) // if >=0, the initial weight is for newly added osds.
-OPTION(osd_pool_default_crush_rule, OPT_INT)
OPTION(osd_pool_erasure_code_stripe_unit, OPT_U32) // in bytes
-OPTION(osd_pool_default_size, OPT_INT)
-OPTION(osd_pool_default_min_size, OPT_INT) // 0 means no specific default; ceph will use size-size/2
-OPTION(osd_pool_default_pg_num, OPT_INT) // number of PGs for new pools. Configure in global or mon section of ceph.conf
-OPTION(osd_pool_default_pgp_num, OPT_INT) // number of PGs for placement purposes. Should be equal to pg_num
-OPTION(osd_pool_default_type, OPT_STR)
-OPTION(osd_pool_default_erasure_code_profile, OPT_STR) // default properties of osd pool create
OPTION(osd_erasure_code_plugins, OPT_STR) // list of erasure code plugins
// Allows the "peered" state for recovery and backfill below min_size
.set_default(0)
.set_description(""),
- Option("mon_osd_min_down_reporters", Option::TYPE_INT, Option::LEVEL_ADVANCED)
+ Option("mon_osd_min_down_reporters", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(2)
- .set_description(""),
+ .add_service("mon")
+ .set_description("number of OSDs from different subtrees who need to report a down OSD for it to count")
+ .add_see_also("mon_osd_reporter_subtree_level"),
Option("mon_osd_reporter_subtree_level", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("host")
- .set_description(""),
+ .add_service("mon")
+ .set_safe()
+ .set_description("in which level of parent bucket the reporters are counted"),
Option("mon_osd_force_trim_to", Option::TYPE_INT, Option::LEVEL_ADVANCED)
.set_default(0)
.set_default(4_K)
.set_description(""),
- Option("osd_pool_default_size", Option::TYPE_INT, Option::LEVEL_ADVANCED)
+ Option("osd_pool_default_size", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(3)
- .set_description(""),
+ .set_description("the number of copies of an object")
+ .add_service("mon"),
- Option("osd_pool_default_min_size", Option::TYPE_INT, Option::LEVEL_ADVANCED)
+ Option("osd_pool_default_min_size", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(0)
- .set_description(""),
+ .set_description("the minimal number of copies allowed to write to a degraded pool")
+ .set_long_description("0 means no specific default; ceph will use size-size/2")
+ .add_see_also("osd_pool_default_size")
+ .add_service("mon"),
- Option("osd_pool_default_pg_num", Option::TYPE_INT, Option::LEVEL_ADVANCED)
+ Option("osd_pool_default_pg_num", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(8)
- .set_description(""),
+ .set_description("number of PGs for new pools. Configure in global or mon section of ceph.conf")
+ .add_service("mon"),
- Option("osd_pool_default_pgp_num", Option::TYPE_INT, Option::LEVEL_ADVANCED)
+ Option("osd_pool_default_pgp_num", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(8)
- .set_description(""),
+ .set_description("number of PGs for placement purposes. Should be equal to pg_num")
+ .add_see_also("osd_pool_default_pg_num")
+ .add_service("mon"),
Option("osd_pool_default_type", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("replicated")
+ .set_enum_allowed({"replicated", "erasure"})
.set_description(""),
Option("osd_pool_default_erasure_code_profile", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("plugin=jerasure technique=reed_sol_van k=2 m=1")
- .set_description(""),
+ .set_description("default properties of osd pool create"),
Option("osd_erasure_code_plugins", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("jerasure lrc"
*/
int CrushWrapper::get_osd_pool_default_crush_replicated_ruleset(CephContext *cct)
{
- int crush_ruleset = cct->_conf->osd_pool_default_crush_rule;
+ int crush_ruleset = cct->_conf->get_val<int64_t>("osd_pool_default_crush_rule");
if (crush_ruleset < 0) {
crush_ruleset = find_first_ruleset(pg_pool_t::TYPE_REPLICATED);
} else if (!ruleset_exists(crush_ruleset)) {
}
set<string> reporters_by_subtree;
- string reporter_subtree_level = g_conf->mon_osd_reporter_subtree_level;
+ auto reporter_subtree_level = g_conf->get_val<string>("mon_osd_reporter_subtree_level");
utime_t orig_grace(g_conf->osd_heartbeat_grace, 0);
utime_t max_failed_since = fi.get_failed_since();
utime_t failed_for = now - max_failed_since;
<< dendl;
if (failed_for >= grace &&
- (int)reporters_by_subtree.size() >= g_conf->mon_osd_min_down_reporters) {
+ reporters_by_subtree.size() >= g_conf->get_val<uint64_t>("mon_osd_min_down_reporters")) {
dout(1) << " we have enough reporters to mark osd." << target_osd
<< " down" << dendl;
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
map<string,string> *erasure_code_profile_map,
ostream *ss)
{
- int r = get_json_str_map(g_conf->osd_pool_default_erasure_code_profile,
+ int r = get_json_str_map(g_conf->get_val<string>("osd_pool_default_erasure_code_profile"),
*ss,
erasure_code_profile_map);
if (r)
int err = 0;
switch (pool_type) {
case pg_pool_t::TYPE_REPLICATED:
- *size = g_conf->osd_pool_default_size;
- *min_size = g_conf->get_osd_pool_default_min_size();
+ *size = g_conf->get_val<uint64_t>("osd_pool_default_size");
+ *min_size = g_conf->get_val<uint64_t>("osd_pool_default_min_size");
break;
case pg_pool_t::TYPE_ERASURE:
{
if (name.length() == 0)
return -EINVAL;
if (pg_num == 0)
- pg_num = g_conf->osd_pool_default_pg_num;
+ pg_num = g_conf->get_val<uint64_t>("osd_pool_default_pg_num");
if (pgp_num == 0)
- pgp_num = g_conf->osd_pool_default_pgp_num;
+ pgp_num = g_conf->get_val<uint64_t>("osd_pool_default_pgp_num");
if (pg_num > (unsigned)g_conf->mon_max_pool_pg_num) {
*ss << "'pg_num' must be greater than 0 and less than or equal to "
<< g_conf->mon_max_pool_pg_num
string pool_type_str;
cmd_getval(cct, cmdmap, "pool_type", pool_type_str);
if (pool_type_str.empty())
- pool_type_str = g_conf->osd_pool_default_type;
+ pool_type_str = g_conf->get_val<string>("osd_pool_default_type");
string poolstr;
cmd_getval(cct, cmdmap, "pool", poolstr);
pools[pool].set_flag(pg_pool_t::FLAG_NOPGCHANGE);
if (cct->_conf->osd_pool_default_flag_nosizechange)
pools[pool].set_flag(pg_pool_t::FLAG_NOSIZECHANGE);
- pools[pool].size = cct->_conf->osd_pool_default_size;
+ pools[pool].size = cct->_conf->get_val<uint64_t>("osd_pool_default_size");
pools[pool].min_size = cct->_conf->get_osd_pool_default_min_size();
pools[pool].crush_rule = default_replicated_rule;
pools[pool].object_hash = CEPH_STR_HASH_RJENKINS;
map<string,string> &profile_map,
ostream *ss)
{
- int r = get_json_str_map(cct->_conf->osd_pool_default_erasure_code_profile,
+ int r = get_json_str_map(cct->_conf->get_val<string>("osd_pool_default_erasure_code_profile"),
*ss,
&profile_map);
return r;
const uint64_t pool = Collection::MIN_POOL_ID + td->thread_number;
// create a collection for each object, up to osd_pool_default_pg_num
- uint32_t count = g_conf->osd_pool_default_pg_num;
+ uint32_t count = get_val<uint64_t>("osd_pool_default_pg_num");
if (count > td->o.nr_files)
count = td->o.nr_files;