]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephfs: Added a inotable repair case to the test
authorVishal Kanaujia <Vishal.Kanaujia@sandisk.com>
Wed, 13 Jul 2016 14:18:49 +0000 (19:48 +0530)
committerJohn Spray <john.spray@redhat.com>
Thu, 28 Jul 2016 10:31:55 +0000 (11:31 +0100)
tasks/cephfs/cephfs_test_case.py
tasks/cephfs/filesystem.py
tasks/cephfs/test_forward_scrub.py

index fb24fd6e22679113a5805246ea66a45067b60761..55529528ac0fce04ebd51fcd422a5cefafd42065 100644 (file)
@@ -348,7 +348,7 @@ class CephFSTestCase(unittest.TestCase):
         else:
             raise AssertionError("MDS daemon '{0}' did not crash as expected".format(daemon_id))
 
-    def assert_cluster_log(self, expected_pattern):
+    def assert_cluster_log(self, expected_pattern, invert_match=False):
         """
         Context manager.  Assert that during execution, or up to 5 seconds later,
         the Ceph cluster log emits a message matching the expected pattern.
@@ -360,7 +360,11 @@ class CephFSTestCase(unittest.TestCase):
 
         class ContextManager(object):
             def match(self):
-                return expected_pattern in self.watcher_process.stdout.getvalue()
+                found = expected_pattern in self.watcher_process.stdout.getvalue()
+                if invert_match:
+                    return not found
+
+                return found
 
             def __enter__(self):
                 self.watcher_process = ceph_manager.run_ceph_w()
@@ -402,7 +406,7 @@ class CephFSTestCase(unittest.TestCase):
                 for ss in summary_strings:
                     if pattern in ss:
                          return True
-                
+
             log.debug("Not found expected summary strings yet ({0})".format(summary_strings))
             return False
 
index 577dcfacaa642f6328a9e90206a37a0e9dc5a6aa..946550a3e4680692b0d669311f8d19438bac3466 100644 (file)
@@ -283,6 +283,9 @@ class Filesystem(MDSCluster):
         osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
         return pg_warn_min_per_osd * osd_count
 
+    def get_all_mds_info(self):
+        return self.get_mds_info()
+
     def create(self):
         log.info("Creating filesystem '{0}'".format(self.name))
 
@@ -442,6 +445,15 @@ class Filesystem(MDSCluster):
         """
         return self.get_daemon_names("up:active")
 
+    def get_all_mds_rank(self):
+        status = self.get_mds_map()
+        result = []
+        for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+            if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
+                result.append(mds_status['rank'])
+
+        return result
+
     def get_rank_names(self):
         """
         Return MDS daemon names of those daemons holding a rank,
@@ -497,6 +509,28 @@ class Filesystem(MDSCluster):
         self.delete_all_filesystems()
         self.create()
 
+    def put_metadata_object_raw(self, object_id, infile):
+        """
+        Save an object to the metadata pool
+        """
+        temp_bin_path = infile
+        self.client_remote.run(args=[
+            'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'put', object_id, temp_bin_path
+        ])
+
+    def get_metadata_object_raw(self, object_id):
+        """
+        Retrieve an object from the metadata pool and store it in a file.
+        """
+        temp_bin_path = '/tmp/' + object_id + '.bin'
+        object_type = "InoTable"
+
+        self.client_remote.run(args=[
+            'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path
+        ])
+
+        return temp_bin_path
+
     def get_metadata_object(self, object_type, object_id):
         """
         Retrieve an object from the metadata pool, pass it through
index 6a179a67535627a5092c7bf25d156ce8a1868490..6df9b73e33cf1a82af975095bbd87cb313138e6a 100644 (file)
@@ -194,3 +194,71 @@ class TestForwardScrub(CephFSTestCase):
         self.fs.wait_for_daemons()
         self.mount_a.mount()
         self._validate_linkage(inos)
+
+    def _stash_inotable(self):
+        # Get all active ranks
+        ranks = self.fs.get_all_mds_rank()
+
+        inotable_dict = {}
+        for rank in ranks:
+            inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable"
+            print "Trying to fetch inotable object: " + inotable_oid
+
+            #self.fs.get_metadata_object("InoTable", "mds0_inotable")
+            inotable_raw = self.fs.get_metadata_object_raw(inotable_oid)
+            inotable_dict[inotable_oid] = inotable_raw
+        return inotable_dict
+
+    def test_inotable_sync(self):
+        self.mount_a.write_n_mb("file1_sixmegs", 6)
+
+        # Flush journal
+        self.mount_a.umount_wait()
+        self.fs.mds_asok(["flush", "journal"])
+
+        inotable_copy = self._stash_inotable()
+
+        self.mount_a.mount()
+
+        self.mount_a.write_n_mb("file2_sixmegs", 6)
+        self.mount_a.write_n_mb("file3_sixmegs", 6)
+
+        inos = self._get_paths_to_ino()
+
+        # Flush journal
+        self.mount_a.umount_wait()
+        self.fs.mds_asok(["flush", "journal"])
+
+        self.mount_a.umount_wait()
+
+        with self.assert_cluster_log("inode table repaired", invert_match=True):
+            self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+
+        self.mds_cluster.mds_stop()
+        self.mds_cluster.mds_fail()
+
+        # Truncate the journal (to ensure the inotable on disk
+        # is all that will be in the InoTable in memory)
+
+        self.fs.journal_tool(["event", "splice",
+            "--inode={0}".format(inos["./file2_sixmegs"]), "summary"])
+
+        self.fs.journal_tool(["event", "splice",
+            "--inode={0}".format(inos["./file3_sixmegs"]), "summary"])
+
+        # Revert to old inotable.
+        for key, value in inotable_copy.iteritems():
+           self.fs.put_metadata_object_raw(key, value)
+
+        self.mds_cluster.mds_restart()
+        self.fs.wait_for_daemons()
+
+        with self.assert_cluster_log("inode table repaired"):
+            self.fs.mds_asok(["scrub_path", "/", "repair", "recursive"])
+
+        self.mds_cluster.mds_stop()
+        table_text = self.fs.table_tool(["0", "show", "inode"])
+        table = json.loads(table_text)
+        self.assertGreater(
+                table['0']['data']['inotable']['free'][0]['start'],
+                inos['./file3_sixmegs'])