From a10fc025d77406b74a220ef44621525cd3c79b80 Mon Sep 17 00:00:00 2001 From: Loic Dachary Date: Wed, 25 Dec 2013 13:19:56 +0100 Subject: [PATCH] osd: build_simple creates a single rule The three rules created by build_simple are identical. They are replaced by a single rule named replicated_rule which is set to be used by the data, rbd and metadata pools. Instead of hardcoding the ruleset number to zero, it is read from osd_pool_default_crush_ruleset which defaults to zero. The CEPH_DEFAULT_CRUSH_REPLICATED_RULESET enum is moved from osd_type.h to config.h because it may be needed when osd_type.h is not included. Signed-off-by: Loic Dachary --- qa/workunits/cephtool/test.sh | 2 +- src/common/config.h | 4 +++ src/common/config_opts.h | 2 +- src/osd/OSDMap.cc | 49 +++++++++++++------------- src/osd/OSDMap.h | 6 ++-- src/osd/osd_types.h | 7 ---- src/test/cli/osdmaptool/clobber.t | 8 ++--- src/test/cli/osdmaptool/create-print.t | 4 +-- src/test/cli/osdmaptool/create-racks.t | 18 +++++++++- 9 files changed, 56 insertions(+), 44 deletions(-) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 49219aad11837..ae410c72616da 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -359,7 +359,7 @@ ceph osd pool set rbd hit_set_period 123 ceph osd pool set rbd hit_set_count 12 ceph osd pool set rbd hit_set_fpp .01 -ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 2' +ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0' ceph osd thrash 10 diff --git a/src/common/config.h b/src/common/config.h index 08ae660b92b8e..4ba1cb9b87886 100644 --- a/src/common/config.h +++ b/src/common/config.h @@ -29,6 +29,10 @@ extern struct ceph_file_layout g_default_file_layout; #include "common/config_obs.h" #include "msg/msg_types.h" +enum { + CEPH_DEFAULT_CRUSH_REPLICATED_RULESET, +}; + #define OSD_REP_PRIMARY 0 #define OSD_REP_SPLAY 1 #define OSD_REP_CHAIN 2 diff --git a/src/common/config_opts.h b/src/common/config_opts.h index 5f6ecdd78d420..d2296b41f34cb 100644 --- a/src/common/config_opts.h +++ b/src/common/config_opts.h @@ -395,7 +395,7 @@ OPTION(osd_pgp_bits, OPT_INT, 6) // bits per osd OPTION(osd_crush_chooseleaf_type, OPT_INT, 1) // 1 = host OPTION(osd_min_rep, OPT_INT, 1) OPTION(osd_max_rep, OPT_INT, 10) -OPTION(osd_pool_default_crush_rule, OPT_INT, 0) +OPTION(osd_pool_default_crush_rule, OPT_INT, CEPH_DEFAULT_CRUSH_REPLICATED_RULESET) OPTION(osd_pool_default_size, OPT_INT, 3) OPTION(osd_pool_default_min_size, OPT_INT, 0) // 0 means no specific default; ceph will use size-size/2 OPTION(osd_pool_default_pg_num, OPT_INT, 8) // number of PGs for new pools. Configure in global or mon section of ceph.conf diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index d47cc6b430196..0a1ad6bc8cd14 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -1883,15 +1883,15 @@ int OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid, if (pgp_bits > pg_bits) pgp_bits = pg_bits; - // crush map - map rulesets; - rulesets[CEPH_DATA_RULE] = "data"; - rulesets[CEPH_METADATA_RULE] = "metadata"; - rulesets[CEPH_RBD_RULE] = "rbd"; + vector pool_names; + pool_names.push_back("data"); + pool_names.push_back("metadata"); + pool_names.push_back("rbd"); int poolbase = get_max_osd() ? get_max_osd() : 1; - for (map::iterator p = rulesets.begin(); p != rulesets.end(); ++p) { + for (vector::iterator p = pool_names.begin(); + p != pool_names.end(); ++p) { int64_t pool = ++pool_max; pools[pool].type = pg_pool_t::TYPE_REPLICATED; pools[pool].flags = cct->_conf->osd_pool_default_flags; @@ -1899,21 +1899,21 @@ int OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid, pools[pool].flags |= pg_pool_t::FLAG_HASHPSPOOL; pools[pool].size = cct->_conf->osd_pool_default_size; pools[pool].min_size = cct->_conf->get_osd_pool_default_min_size(); - pools[pool].crush_ruleset = p->first; + pools[pool].crush_ruleset = cct->_conf->osd_pool_default_crush_rule; pools[pool].object_hash = CEPH_STR_HASH_RJENKINS; pools[pool].set_pg_num(poolbase << pg_bits); pools[pool].set_pgp_num(poolbase << pgp_bits); pools[pool].last_change = epoch; - if (p->first == CEPH_DATA_RULE) + if (*p == "data") pools[pool].crash_replay_interval = cct->_conf->osd_default_data_pool_replay_window; - pool_name[pool] = p->second; - name_pool[p->second] = pool; + pool_name[pool] = *p; + name_pool[*p] = pool; } if (nosd >= 0) - build_simple_crush_map(cct, *crush, rulesets, nosd); + build_simple_crush_map(cct, *crush, nosd); else - build_simple_crush_map_from_conf(cct, *crush, rulesets); + build_simple_crush_map_from_conf(cct, *crush); for (int i=0; i& rulesets, int nosd) + int nosd) { const md_config_t *conf = cct->_conf; @@ -1969,9 +1969,10 @@ void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush, int minrep = conf->osd_min_rep; int maxrep = conf->osd_max_rep; assert(maxrep >= minrep); - for (map::iterator p = rulesets.begin(); p != rulesets.end(); ++p) { - int ruleset = p->first; - crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REPLICATED, minrep, maxrep); + { + crush_rule *rule = crush_make_rule(3, cct->_conf->osd_pool_default_crush_rule, + pg_pool_t::TYPE_REPLICATED, + minrep, maxrep); assert(rule); crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0); crush_rule_set_step(rule, 1, @@ -1980,14 +1981,12 @@ void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush, cct->_conf->osd_crush_chooseleaf_type); crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0); int rno = crush_add_rule(crush.crush, rule, -1); - crush.set_rule_name(rno, p->second); + crush.set_rule_name(rno, "replicated_rule"); } - crush.finalize(); } -void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush, - map& rulesets) +void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush) { const md_config_t *conf = cct->_conf; @@ -2053,9 +2052,10 @@ void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& cr // rules int minrep = conf->osd_min_rep; int maxrep = conf->osd_max_rep; - for (map::iterator p = rulesets.begin(); p != rulesets.end(); ++p) { - int ruleset = p->first; - crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REPLICATED, minrep, maxrep); + { + crush_rule *rule = crush_make_rule(3, cct->_conf->osd_pool_default_crush_rule, + pg_pool_t::TYPE_REPLICATED, + minrep, maxrep); assert(rule); crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0); @@ -2071,9 +2071,8 @@ void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& cr } crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0); int rno = crush_add_rule(crush.crush, rule, -1); - crush.set_rule_name(rno, p->second); + crush.set_rule_name(rno, "replicated_rule"); } - crush.finalize(); } diff --git a/src/osd/OSDMap.h b/src/osd/OSDMap.h index 20bb77f35edab..87e8e68f49df3 100644 --- a/src/osd/OSDMap.h +++ b/src/osd/OSDMap.h @@ -617,9 +617,9 @@ public: int num_osd, int pg_bits, int pgp_bits); static int _build_crush_types(CrushWrapper& crush); static void build_simple_crush_map(CephContext *cct, CrushWrapper& crush, - map& poolsets, int num_osd); - static void build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush, - map& rulesets); + int num_osd); + static void build_simple_crush_map_from_conf(CephContext *cct, + CrushWrapper& crush); bool crush_ruleset_in_use(int ruleset) const; diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index 7ca9fecbf3d82..96dbced277a87 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -216,13 +216,6 @@ enum { #define CEPH_CAS_NS 3 #define CEPH_OSDMETADATA_NS 0xff -// poolsets -enum { - CEPH_DATA_RULE, - CEPH_METADATA_RULE, - CEPH_RBD_RULE, -}; - #define OSD_SUPERBLOCK_POBJECT hobject_t(sobject_t(object_t("osd_superblock"), 0)) // placement seed (a hash value) diff --git a/src/test/cli/osdmaptool/clobber.t b/src/test/cli/osdmaptool/clobber.t index 9d8cfef04c60b..b9986d3386891 100644 --- a/src/test/cli/osdmaptool/clobber.t +++ b/src/test/cli/osdmaptool/clobber.t @@ -21,8 +21,8 @@ flags pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool crash_replay_interval 45 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 2 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool + pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool + pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool max_osd 3 @@ -44,8 +44,8 @@ flags pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool crash_replay_interval 45 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 2 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool + pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool + pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool max_osd 1 diff --git a/src/test/cli/osdmaptool/create-print.t b/src/test/cli/osdmaptool/create-print.t index c10e3d182eff9..80761d9c371a6 100644 --- a/src/test/cli/osdmaptool/create-print.t +++ b/src/test/cli/osdmaptool/create-print.t @@ -11,8 +11,8 @@ flags pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool crash_replay_interval 45 - pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool - pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 2 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool + pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool + pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool max_osd 3 diff --git a/src/test/cli/osdmaptool/create-racks.t b/src/test/cli/osdmaptool/create-racks.t index f686ef4c05131..e8fe4d0750244 100644 --- a/src/test/cli/osdmaptool/create-racks.t +++ b/src/test/cli/osdmaptool/create-racks.t @@ -1,6 +1,22 @@ - $ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks > /dev/null + $ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks osdmaptool: osdmap file 'om' + osdmaptool: writing epoch 1 to om $ osdmaptool --test-map-pg 0.0 om osdmaptool: osdmap file 'om' parsed '0.0' -> 0.0 0.0 raw [] up [] acting [] + $ osdmaptool --print om + osdmaptool: osdmap file 'om' + epoch 1 + fsid [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} (re) + created \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re) + modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re) + flags + + pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 owner 0 flags hashpspool crash_replay_interval 45 + pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 owner 0 flags hashpspool + pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 owner 0 flags hashpspool + + max_osd 239 + + $ rm -f om -- 2.39.5