]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: use CrushWrapper::add_simple_ruleset
authorLoic Dachary <loic@dachary.org>
Thu, 26 Dec 2013 10:20:41 +0000 (11:20 +0100)
committerLoic Dachary <loic@dachary.org>
Thu, 26 Dec 2013 23:13:47 +0000 (00:13 +0100)
Replace the manually crafted ruleset in OSDMap::build_simple_crush_map*
with calls to add_simple_ruleset. The generated ruleset do not have the
same behavior but that presumably do not cause any backward
compatibility problem because they are only created when a new cluster
is being initialized.

The prototypes of OSDMap::build_simple* are modified to allow for a
return code and display of a human readable error message.

The --osd-min-rep and --osd-max-rep configuration options are removed :
they were only used in the code that was removed.

Signed-off-by: Loic Dachary <loic@dachary.org>
doc/rados/configuration/pool-pg-config-ref.rst
src/common/config_opts.h
src/osd/OSDMap.cc
src/osd/OSDMap.h

index 82d9185205ecba026f49a7d4b5aa65780ff0fe5c..8c6632764a453c705911dbae4fd04122b76423f3 100644 (file)
@@ -66,20 +66,6 @@ Ceph configuration file.
 :Default: ``1``. Typically a host containing one or more Ceph OSD Daemons.
 
 
-``osd min rep``
-
-:Description: The minimum number of replicas for a ruleset.
-:Type: 32-bit Integer
-:Default: ``1``
-
-
-``osd max rep``
-
-:Description: The maximum number of replicas for a ruleset.
-:Type: 32-bit Integer
-:Default: ``10``
-
-
 ``osd pool default crush rule`` 
 
 :Description: The default CRUSH ruleset to use when creating a pool.
index d2296b41f34cbee71ff7a4ff8f2c1b18ff57b9bf..57b1b26b410e1d1180f4cb58dbabcef285740620 100644 (file)
@@ -393,8 +393,6 @@ OPTION(osd_client_message_cap, OPT_U64, 100)              // num client messages
 OPTION(osd_pg_bits, OPT_INT, 6)  // bits per osd
 OPTION(osd_pgp_bits, OPT_INT, 6)  // bits per osd
 OPTION(osd_crush_chooseleaf_type, OPT_INT, 1) // 1 = host
-OPTION(osd_min_rep, OPT_INT, 1)
-OPTION(osd_max_rep, OPT_INT, 10)
 OPTION(osd_pool_default_crush_rule, OPT_INT, CEPH_DEFAULT_CRUSH_REPLICATED_RULESET)
 OPTION(osd_pool_default_size, OPT_INT, 3)
 OPTION(osd_pool_default_min_size, OPT_INT, 0)  // 0 means no specific default; ceph will use size-size/2
index 0a1ad6bc8cd14fe17b246d5d5939975543c5ae8d..4a02ab4d36b5513a19d23d96ead03e66238d98eb 100644 (file)
@@ -1910,17 +1910,22 @@ int OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid,
     name_pool[*p] = pool;
   }
 
+  stringstream ss;
+  int r;
   if (nosd >= 0)
-    build_simple_crush_map(cct, *crush, nosd);
+    r = build_simple_crush_map(cct, *crush, nosd, &ss);
   else
-    build_simple_crush_map_from_conf(cct, *crush);
+    r = build_simple_crush_map_from_conf(cct, *crush, &ss);
 
+  if (r < 0)
+    lderr(cct) << ss.str() << dendl;
+  
   for (int i=0; i<get_max_osd(); i++) {
     set_state(i, 0);
     set_weight(i, CEPH_OSD_OUT);
   }
 
-  return 0;
+  return r;
 }
 
 int OSDMap::_build_crush_types(CrushWrapper& crush)
@@ -1939,11 +1944,9 @@ int OSDMap::_build_crush_types(CrushWrapper& crush)
   return 10;
 }
 
-void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
-                                   int nosd)
+int OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
+                                  int nosd, stringstream *ss)
 {
-  const md_config_t *conf = cct->_conf;
-
   crush.create();
 
   // root
@@ -1965,28 +1968,19 @@ void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
     crush.insert_item(cct, o, 1.0, name, loc);
   }
 
-  // rules
-  int minrep = conf->osd_min_rep;
-  int maxrep = conf->osd_max_rep;
-  assert(maxrep >= minrep);
-  {
-    crush_rule *rule = crush_make_rule(3, cct->_conf->osd_pool_default_crush_rule,
-                                      pg_pool_t::TYPE_REPLICATED,
-                                      minrep, maxrep);
-    assert(rule);
-    crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0);
-    crush_rule_set_step(rule, 1,
-                       cct->_conf->osd_crush_chooseleaf_type ? CRUSH_RULE_CHOOSELEAF_FIRSTN : CRUSH_RULE_CHOOSE_FIRSTN,
-                       CRUSH_CHOOSE_N,
-                       cct->_conf->osd_crush_chooseleaf_type);
-    crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0);
-    int rno = crush_add_rule(crush.crush, rule, -1);
-    crush.set_rule_name(rno, "replicated_rule");
-  }
+  r = crush.add_simple_ruleset("replicated_ruleset", "default", "host",
+                              "firstn", pg_pool_t::TYPE_REPLICATED, ss);
+  if (r < 0)
+    return r;
+
   crush.finalize();
+
+  return 0;
 }
 
-void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush)
+int OSDMap::build_simple_crush_map_from_conf(CephContext *cct,
+                                            CrushWrapper& crush,
+                                            stringstream *ss)
 {
   const md_config_t *conf = cct->_conf;
 
@@ -2049,31 +2043,14 @@ void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& cr
     crush.insert_item(cct, o, 1.0, *i, loc);
   }
 
-  // rules
-  int minrep = conf->osd_min_rep;
-  int maxrep = conf->osd_max_rep;
-  {
-    crush_rule *rule = crush_make_rule(3, cct->_conf->osd_pool_default_crush_rule,
-                                      pg_pool_t::TYPE_REPLICATED,
-                                      minrep, maxrep);
-    assert(rule);
-    crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0);
-
-    if (racks.size() > 3) {
-      // spread replicas across hosts
-      crush_rule_set_step(rule, 1, CRUSH_RULE_CHOOSELEAF_FIRSTN, CRUSH_CHOOSE_N, 2);
-    } else if (hosts.size() > 1) {
-      // spread replicas across hosts
-      crush_rule_set_step(rule, 1, CRUSH_RULE_CHOOSELEAF_FIRSTN, CRUSH_CHOOSE_N, 1);
-    } else {
-      // just spread across osds
-      crush_rule_set_step(rule, 1, CRUSH_RULE_CHOOSE_FIRSTN, CRUSH_CHOOSE_N, 0);
-    }
-    crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0);
-    int rno = crush_add_rule(crush.crush, rule, -1);
-    crush.set_rule_name(rno, "replicated_rule");
-  }
+  r = crush.add_simple_ruleset("replicated_ruleset", "default", "host",
+                              "firstn", pg_pool_t::TYPE_REPLICATED, ss);
+  if (r < 0)
+    return r;
+
   crush.finalize();
+
+  return 0;
 }
 
 
index 87e8e68f49df33e0de7065f931a1ce8524f7bba6..dae46450dca0c603f0b4a4828318ed0d21b93d08 100644 (file)
@@ -616,10 +616,11 @@ public:
   int build_simple(CephContext *cct, epoch_t e, uuid_d &fsid,
                   int num_osd, int pg_bits, int pgp_bits);
   static int _build_crush_types(CrushWrapper& crush);
-  static void build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
-                                    int num_osd);
-  static void build_simple_crush_map_from_conf(CephContext *cct,
-                                              CrushWrapper& crush);
+  static int build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
+                                   int num_osd, stringstream *ss);
+  static int build_simple_crush_map_from_conf(CephContext *cct,
+                                             CrushWrapper& crush,
+                                             stringstream *ss);
 
   bool crush_ruleset_in_use(int ruleset) const;