OPTION(osd_client_message_size_cap, OPT_U64, 500*1024L*1024L) // client data allowed in-memory (in bytes)
OPTION(osd_pg_bits, OPT_INT, 6) // bits per osd
OPTION(osd_pgp_bits, OPT_INT, 6) // bits per osd
+OPTION(osd_crush_chooseleaf_type, OPT_INT, 1) // 1 = host
OPTION(osd_min_rep, OPT_INT, 1)
OPTION(osd_max_rep, OPT_INT, 10)
OPTION(osd_pool_default_crush_rule, OPT_INT, 0)
crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REP, minrep, maxrep);
assert(rule);
crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0);
- // just spread across osds
- crush_rule_set_step(rule, 1, CRUSH_RULE_CHOOSE_FIRSTN, CRUSH_CHOOSE_N, 0);
+ crush_rule_set_step(rule, 1,
+ cct->_conf->osd_crush_chooseleaf_type ? CRUSH_RULE_CHOOSE_LEAF_FIRSTN : CRUSH_RULE_CHOOSE_FIRSTN,
+ CRUSH_CHOOSE_N,
+ cct->_conf->osd_crush_chooseleaf_type);
crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0);
int rno = crush_add_rule(crush.crush, rule, -1);
crush.set_rule_name(rno, p->second);
[global]
osd pg bits = 3
osd pgp bits = 5 ; (invalid, but ceph should cope!)
+ osd crush chooseleaf type = 0
osd pool default min size = 1
EOF
if [ "$cephx" -eq 1 ] ; then