for info in self.get_standbys():
yield info
for fs in self.map['filesystems']:
- for info in fs['mdsmap']['info']:
+ for info in fs['mdsmap']['info'].values():
yield info
def get_standbys(self):
Get the standby:replay MDS for the given FSCID.
"""
fs = self.get_fsmap(fscid)
- for info in fs['mdsmap']['info']:
+ for info in fs['mdsmap']['info'].values():
if info['state'] == 'up:standby-replay':
yield info
Get the ranks for the given FSCID.
"""
fs = self.get_fsmap(fscid)
- for info in fs['mdsmap']['info']:
+ for info in fs['mdsmap']['info'].values():
if info['rank'] >= 0:
yield info
return info
raise RuntimeError("FSCID {0} has no rank {1}".format(fscid, rank))
- def get_cluster(self, fscid):
- """
- Get the MDS cluster for the given FSCID.
- """
- fs = self.get_fsmap(fscid)
- for info in fs['mdsmap']['info']:
- yield info
-
def get_mds(self, name):
"""
Get the info for the given MDS name.
mdsmap = fs['mdsmap']
metadata_pool = pool_id_name[mdsmap['metadata_pool']]
- for info in status.get_ranks(fs['id']):
- self.mon_manager.raw_cluster_cmd('mds', 'fail', str(info['gid']))
+ for gid in mdsmap['up'].values():
+ self.mon_manager.raw_cluster_cmd('mds', 'fail', gid.__str__())
self.mon_manager.raw_cluster_cmd('fs', 'rm', mdsmap['fs_name'], '--yes-i-really-mean-it')
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
- for info in mds_map['info']:
- if info['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
- log.warning("Unhealthy mds state {0}:{1}".format(info['gid'], info['state']))
+ for mds_id, mds_status in mds_map['info'].items():
+ if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
+ log.warning("Unhealthy mds state {0}:{1}".format(mds_id, mds_status['state']))
return False
- elif info['state'] == 'up:active':
+ elif mds_status['state'] == 'up:active':
active_count += 1
log.info("are_daemons_healthy: {0}/{1}".format(
if active_count >= mds_map['max_mds']:
# The MDSMap says these guys are active, but let's check they really are
- for info in mds_map['info']:
- if info['state'] == 'up:active':
+ for mds_id, mds_status in mds_map['info'].items():
+ if mds_status['state'] == 'up:active':
try:
- daemon_status = self.mds_asok(["status"], mds_id=info['name'])
+ daemon_status = self.mds_asok(["status"], mds_id=mds_status['name'])
except CommandFailedError as cfe:
if cfe.exitstatus == errno.EINVAL:
# Old version, can't do this check
"""
status = self.get_mds_map()
result = []
- for mds_status in sorted(status['info'], lambda a, b: cmp(a['rank'], b['rank'])):
+ for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
if mds_status['state'] == state or state is None:
result.append(mds_status['name'])
def get_all_mds_rank(self):
status = self.get_mds_map()
result = []
- for mds_status in sorted(status['info'], lambda a, b: cmp(a['rank'], b['rank'])):
+ for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
result.append(mds_status['rank'])
"""
status = self.get_mds_map()
result = []
- for mds_status in sorted(status['info'], lambda a, b: cmp(a['rank'], b['rank'])):
+ for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
result.append(mds_status['name'])
for (set<mds_rank_t>::const_iterator p = in.begin(); p != in.end(); ++p)
f->dump_int("mds", *p);
f->close_section();
- f->open_array_section("up");
+ f->open_object_section("up");
for (map<mds_rank_t,mds_gid_t>::const_iterator p = up.begin(); p != up.end(); ++p) {
- f->dump_int("mds", p->first);
+ char s[14];
+ sprintf(s, "mds_%d", int(p->first));
+ f->dump_int(s, p->second);
}
f->close_section();
f->open_array_section("failed");
for (set<mds_rank_t>::const_iterator p = stopped.begin(); p != stopped.end(); ++p)
f->dump_int("mds", *p);
f->close_section();
- f->open_array_section("info");
+ f->open_object_section("info");
for (map<mds_gid_t,mds_info_t>::const_iterator p = mds_info.begin(); p != mds_info.end(); ++p) {
- f->open_object_section("info_item");
+ char s[25]; // 'gid_' + len(str(ULLONG_MAX)) + '\0'
+ sprintf(s, "gid_%llu", (long long unsigned)p->first);
+ f->open_object_section(s);
p->second.dump(f);
f->close_section();
}
super(RankEvicter, self).__init__()
- def _get_info(self, key, value):
- for info in self._mds_map["info"]:
- if info[key] == value:
- return info
- return {}
-
def _ready_to_evict(self):
- info = self._get_info('rank', self.rank)
- if (self.rank not in self._mds_map['up']) or info.get('gid', None) != self.gid:
+ if self._mds_map['up'].get("mds_{0}".format(self.rank), None) != self.gid:
log.info("Evicting {0} from {1}/{2}: rank no longer associated with gid, done.".format(
self._client_spec, self.rank, self.gid
))
raise RankEvicter.GidGone()
- info = self._get_info('gid', self.gid)
+ info = self._mds_map['info']["gid_{0}".format(self.gid)]
log.debug("_ready_to_evict: state={0}".format(info['state']))
return info['state'] in ["up:active", "up:clientreplay"]
mds_map = self.get_mds_map()
up = {}
- for rank in mds_map['up']:
- up[rank] = self._get_info('rank', rank)['gid']
+ for name, gid in mds_map['up'].items():
+ # Quirk of the MDSMap JSON dump: keys in the up dict are like "mds_0"
+ assert name.startswith("mds_")
+ up[int(name[4:])] = gid
# For all MDS ranks held by a daemon
# Do the parallelism in python instead of using "tell mds.*", because