From: Sage Weil Date: Wed, 25 Apr 2012 22:59:24 +0000 (-0700) Subject: osdmap: use shared_ptr for CrushWrapper X-Git-Tag: v0.47~87^2~12 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=98b1d8f36cde3ea9ac73f1ef532bb45d99dfd538;p=ceph.git osdmap: use shared_ptr for CrushWrapper Signed-off-by: Sage Weil --- diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index d49082f829c5..08e5e0d88e35 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -1497,7 +1497,7 @@ bool OSDMonitor::preprocess_command(MMonCommand *m) ss << "got osdmap epoch " << p->get_epoch(); r = 0; } else if (cmd == "getcrushmap") { - p->crush.encode(rdata); + p->crush->encode(rdata); ss << "got crush map from osdmap epoch " << p->get_epoch(); r = 0; } @@ -1752,7 +1752,7 @@ bool OSDMonitor::prepare_command(MMonCommand *m) if (pending_inc.crush.length()) bl = pending_inc.crush; else - osdmap.crush.encode(bl); + osdmap.crush->encode(bl); CrushWrapper newcrush; bufferlist::iterator p = bl.begin(); @@ -1783,7 +1783,7 @@ bool OSDMonitor::prepare_command(MMonCommand *m) if (pending_inc.crush.length()) bl = pending_inc.crush; else - osdmap.crush.encode(bl); + osdmap.crush->encode(bl); CrushWrapper newcrush; bufferlist::iterator p = bl.begin(); @@ -1812,7 +1812,7 @@ bool OSDMonitor::prepare_command(MMonCommand *m) if (pending_inc.crush.length()) bl = pending_inc.crush; else - osdmap.crush.encode(bl); + osdmap.crush->encode(bl); CrushWrapper newcrush; bufferlist::iterator p = bl.begin(); @@ -1839,10 +1839,10 @@ bool OSDMonitor::prepare_command(MMonCommand *m) } else if (m->cmd[1] == "setmaxosd" && m->cmd.size() > 2) { int newmax = atoi(m->cmd[2].c_str()); - if (newmax < osdmap.crush.get_max_devices()) { + if (newmax < osdmap.crush->get_max_devices()) { err = -ERANGE; ss << "cannot set max_osd to " << newmax << " which is < crush max_devices " - << osdmap.crush.get_max_devices(); + << osdmap.crush->get_max_devices(); goto out; } @@ -2301,7 +2301,7 @@ bool OSDMonitor::prepare_command(MMonCommand *m) return true; } } else if (m->cmd[4] == "crush_ruleset") { - if (osdmap.crush.rule_exists(n)) { + if (osdmap.crush->rule_exists(n)) { if (pending_inc.new_pools.count(pool) == 0) pending_inc.new_pools[pool] = *p; pending_inc.new_pools[pool].crush_ruleset = n; diff --git a/src/mon/PGMonitor.cc b/src/mon/PGMonitor.cc index 2778f883aba1..c142cda519e0 100644 --- a/src/mon/PGMonitor.cc +++ b/src/mon/PGMonitor.cc @@ -676,7 +676,7 @@ bool PGMonitor::register_new_pgs() int64_t poolid = p->first; pg_pool_t &pool = p->second; int ruleno = pool.get_crush_ruleset(); - if (!osdmap->crush.rule_exists(ruleno)) + if (!osdmap->crush->rule_exists(ruleno)) continue; if (pool.get_last_change() <= pg_map.last_pg_scan || @@ -712,7 +712,7 @@ bool PGMonitor::register_new_pgs() } } - int max = MIN(osdmap->get_max_osd(), osdmap->crush.get_max_devices()); + int max = MIN(osdmap->get_max_osd(), osdmap->crush->get_max_devices()); int removed = 0; for (set::iterator p = pg_map.creating_pgs.begin(); p != pg_map.creating_pgs.end(); @@ -757,7 +757,7 @@ void PGMonitor::send_pg_creates() utime_t now = ceph_clock_now(g_ceph_context); OSDMap *osdmap = &mon->osdmon()->osdmap; - int max = MIN(osdmap->get_max_osd(), osdmap->crush.get_max_devices()); + int max = MIN(osdmap->get_max_osd(), osdmap->crush->get_max_devices()); for (set::iterator p = pg_map.creating_pgs.begin(); p != pg_map.creating_pgs.end(); diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index 296ea65f9f30..f060b29fd4d9 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -725,7 +725,8 @@ int OSDMap::apply_incremental(Incremental &inc) // do new crush map last (after up/down stuff) if (inc.crush.length()) { bufferlist::iterator blp = inc.crush.begin(); - crush.decode(blp); + crush.reset(new CrushWrapper); + crush->decode(blp); } calc_num_osds(); @@ -772,15 +773,15 @@ int OSDMap::_pg_to_osds(const pg_pool_t& pool, pg_t pg, vector& osds) const unsigned size = pool.get_size(); { int preferred = pg.preferred(); - if (preferred >= max_osd || preferred >= crush.get_max_devices()) + if (preferred >= max_osd || preferred >= crush->get_max_devices()) preferred = -1; - assert(get_max_osd() >= crush.get_max_devices()); + assert(get_max_osd() >= crush->get_max_devices()); // what crush rule? - int ruleno = crush.find_rule(pool.get_crush_ruleset(), pool.get_type(), size); + int ruleno = crush->find_rule(pool.get_crush_ruleset(), pool.get_type(), size); if (ruleno >= 0) - crush.do_rule(ruleno, pps, osds, size, preferred, osd_weight); + crush->do_rule(ruleno, pps, osds, size, preferred, osd_weight); } return osds.size(); @@ -938,7 +939,7 @@ void OSDMap::encode_client_old(bufferlist& bl) const // crush bufferlist cbl; - crush.encode(cbl); + crush->encode(cbl); ::encode(cbl, bl); } @@ -973,7 +974,7 @@ void OSDMap::encode(bufferlist& bl, uint64_t features) const // crush bufferlist cbl; - crush.encode(cbl); + crush->encode(cbl); ::encode(cbl, bl); // extended @@ -1062,7 +1063,7 @@ void OSDMap::decode(bufferlist::iterator& p) bufferlist cbl; ::decode(cbl, p); bufferlist::iterator cblp = cbl.begin(); - crush.decode(cblp); + crush->decode(cblp); // extended __u16 ev = 0; @@ -1298,10 +1299,10 @@ void OSDMap::print_tree(ostream& out) const out << "# id\tweight\ttype name\tup/down\treweight\n"; set touched; set roots; - crush.find_roots(roots); + crush->find_roots(roots); for (set::iterator p = roots.begin(); p != roots.end(); p++) { list q; - q.push_back(qi(*p, 0, crush.get_bucket_weight(*p) / (float)0x10000)); + q.push_back(qi(*p, 0, crush->get_bucket_weight(*p) / (float)0x10000)); while (!q.empty()) { int cur = q.front().item; int depth = q.front().depth; @@ -1319,14 +1320,14 @@ void OSDMap::print_tree(ostream& out) const continue; } - int type = crush.get_bucket_type(cur); - out << crush.get_type_name(type) << " " << crush.get_item_name(cur) << "\n"; + int type = crush->get_bucket_type(cur); + out << crush->get_type_name(type) << " " << crush->get_item_name(cur) << "\n"; // queue bucket contents... - int s = crush.get_bucket_size(cur); + int s = crush->get_bucket_size(cur); for (int k=s-1; k>=0; k--) - q.push_front(qi(crush.get_bucket_item(cur, k), depth+1, - (float)crush.get_bucket_item_weight(cur, k) / (float)0x10000)); + q.push_front(qi(crush->get_bucket_item(cur, k), depth+1, + (float)crush->get_bucket_item_weight(cur, k) / (float)0x10000)); } } @@ -1398,7 +1399,7 @@ void OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid, pool_name[pool] = p->second; } - build_simple_crush_map(cct, crush, rulesets, nosd); + build_simple_crush_map(cct, *crush, rulesets, nosd); for (int i=0; isecond; } - build_simple_crush_map_from_conf(cct, crush, rulesets); + build_simple_crush_map_from_conf(cct, *crush, rulesets); for (int i=0; i<=maxosd; i++) { set_state(i, 0); diff --git a/src/osd/OSDMap.h b/src/osd/OSDMap.h index edac29f055a4..9580ea8f5272 100644 --- a/src/osd/OSDMap.h +++ b/src/osd/OSDMap.h @@ -192,7 +192,7 @@ private: string cluster_snapshot; public: - CrushWrapper crush; // hierarchical map + std::tr1::shared_ptr crush; // hierarchical map friend class OSDMonitor; friend class PGMonitor; @@ -204,7 +204,8 @@ private: flags(0), num_osd(0), max_osd(0), osd_addrs(new addrs_s), - cluster_snapshot_epoch(0) { + cluster_snapshot_epoch(0), + crush(new CrushWrapper) { memset(&fsid, 0, sizeof(fsid)); } diff --git a/src/osdmaptool.cc b/src/osdmaptool.cc index 3fe61d73974d..5a816eb3a597 100644 --- a/src/osdmaptool.cc +++ b/src/osdmaptool.cc @@ -232,7 +232,7 @@ int main(int argc, const char **argv) if (!export_crush.empty()) { bufferlist cbl; - osdmap.crush.encode(cbl); + osdmap.crush->encode(cbl); r = cbl.write_file(export_crush.c_str()); if (r < 0) { cerr << me << ": error writing crush map to " << import_crush << std::endl;