else:
raise AssertionError("MDS daemon '{0}' did not crash as expected".format(daemon_id))
- def assert_cluster_log(self, expected_pattern):
+ def assert_cluster_log(self, expected_pattern, invert_match=False):
"""
Context manager. Assert that during execution, or up to 5 seconds later,
the Ceph cluster log emits a message matching the expected pattern.
class ContextManager(object):
def match(self):
- return expected_pattern in self.watcher_process.stdout.getvalue()
+ found = expected_pattern in self.watcher_process.stdout.getvalue()
+ if invert_match:
+ return not found
+
+ return found
def __enter__(self):
self.watcher_process = ceph_manager.run_ceph_w()
for ss in summary_strings:
if pattern in ss:
return True
-
+
log.debug("Not found expected summary strings yet ({0})".format(summary_strings))
return False
osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
return pg_warn_min_per_osd * osd_count
+ def get_all_mds_info(self):
+ return self.get_mds_info()
+
def create(self):
log.info("Creating filesystem '{0}'".format(self.name))
"""
return self.get_daemon_names("up:active")
+ def get_all_mds_rank(self):
+ status = self.get_mds_map()
+ result = []
+ for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+ if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
+ result.append(mds_status['rank'])
+
+ return result
+
def get_rank_names(self):
"""
Return MDS daemon names of those daemons holding a rank,
self.delete_all_filesystems()
self.create()
+ def put_metadata_object_raw(self, object_id, infile):
+ """
+ Save an object to the metadata pool
+ """
+ temp_bin_path = infile
+ self.client_remote.run(args=[
+ 'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'put', object_id, temp_bin_path
+ ])
+
+ def get_metadata_object_raw(self, object_id):
+ """
+ Retrieve an object from the metadata pool and store it in a file.
+ """
+ temp_bin_path = '/tmp/' + object_id + '.bin'
+ object_type = "InoTable"
+
+ self.client_remote.run(args=[
+ 'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path
+ ])
+
+ return temp_bin_path
+
def get_metadata_object(self, object_type, object_id):
"""
Retrieve an object from the metadata pool, pass it through
self.fs.wait_for_daemons()
self.mount_a.mount()
self._validate_linkage(inos)
+
+ def _stash_inotable(self):
+ # Get all active ranks
+ ranks = self.fs.get_all_mds_rank()
+
+ inotable_dict = {}
+ for rank in ranks:
+ inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable"
+ print "Trying to fetch inotable object: " + inotable_oid
+
+ #self.fs.get_metadata_object("InoTable", "mds0_inotable")
+ inotable_raw = self.fs.get_metadata_object_raw(inotable_oid)
+ inotable_dict[inotable_oid] = inotable_raw
+ return inotable_dict
+
+ def test_inotable_sync(self):
+ self.mount_a.write_n_mb("file1_sixmegs", 6)
+
+ # Flush journal
+ self.mount_a.umount_wait()
+ self.fs.mds_asok(["flush", "journal"])
+
+ inotable_copy = self._stash_inotable()
+
+ self.mount_a.mount()
+
+ self.mount_a.write_n_mb("file2_sixmegs", 6)
+ self.mount_a.write_n_mb("file3_sixmegs", 6)
+
+ inos = self._get_paths_to_ino()
+
+ # Flush journal
+ self.mount_a.umount_wait()
+ self.fs.mds_asok(["flush", "journal"])
+
+ self.mount_a.umount_wait()
+
+ with self.assert_cluster_log("inode table repaired", invert_match=True):
+ self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+
+ self.mds_cluster.mds_stop()
+ self.mds_cluster.mds_fail()
+
+ # Truncate the journal (to ensure the inotable on disk
+ # is all that will be in the InoTable in memory)
+
+ self.fs.journal_tool(["event", "splice",
+ "--inode={0}".format(inos["./file2_sixmegs"]), "summary"])
+
+ self.fs.journal_tool(["event", "splice",
+ "--inode={0}".format(inos["./file3_sixmegs"]), "summary"])
+
+ # Revert to old inotable.
+ for key, value in inotable_copy.iteritems():
+ self.fs.put_metadata_object_raw(key, value)
+
+ self.mds_cluster.mds_restart()
+ self.fs.wait_for_daemons()
+
+ with self.assert_cluster_log("inode table repaired"):
+ self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+
+ self.mds_cluster.mds_stop()
+ table_text = self.fs.table_tool(["0", "show", "inode"])
+ table = json.loads(table_text)
+ self.assertGreater(
+ table['0']['data']['inotable']['free'][0]['start'],
+ inos['./file3_sixmegs'])