]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
test: make rank argument mandatory when running journal_tool 23187/head
authorVenky Shankar <vshankar@redhat.com>
Tue, 4 Sep 2018 06:20:54 +0000 (02:20 -0400)
committerVenky Shankar <vshankar@redhat.com>
Fri, 21 Sep 2018 10:09:39 +0000 (06:09 -0400)
Also, fix a bunch of quirky journal_tool invocations that pass
"--rank" argument as the command argument rather than passing it
as function argument.

Fixes: https://tracker.ceph.com/issues/24780
Signed-off-by: Venky Shankar <vshankar@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_flush.py
qa/tasks/cephfs/test_forward_scrub.py
qa/tasks/cephfs/test_journal_migration.py
qa/tasks/cephfs/test_journal_repair.py
qa/tasks/cephfs/test_recovery_pool.py
qa/workunits/suites/cephfs_journal_tool_smoke.sh

index b2837c2eee82da6592557539052a5c6bc5e85111..8256e914cf03e5978cbce6e6c552c996e18d8846 100644 (file)
@@ -1220,6 +1220,9 @@ class Filesystem(MDSCluster):
         """
         return ""
 
+    def _make_rank(self, rank):
+        return "{}:{}".format(self.name, rank)
+
     def _run_tool(self, tool, args, rank=None, quiet=False):
         # Tests frequently have [client] configuration that jacks up
         # the objecter log level (unlikely to be interesting here)
@@ -1230,7 +1233,7 @@ class Filesystem(MDSCluster):
             base_args = [os.path.join(self._prefix, tool), '--debug-mds=4', '--debug-objecter=1']
 
         if rank is not None:
-            base_args.extend(["--rank", "%d" % rank])
+            base_args.extend(["--rank", "%s" % str(rank)])
 
         t1 = datetime.datetime.now()
         r = self.tool_remote.run(
@@ -1252,11 +1255,12 @@ class Filesystem(MDSCluster):
         mds_id = self.mds_ids[0]
         return self.mds_daemons[mds_id].remote
 
-    def journal_tool(self, args, rank=None, quiet=False):
+    def journal_tool(self, args, rank, quiet=False):
         """
-        Invoke cephfs-journal-tool with the passed arguments, and return its stdout
+        Invoke cephfs-journal-tool with the passed arguments for a rank, and return its stdout
         """
-        return self._run_tool("cephfs-journal-tool", args, rank, quiet)
+        fs_rank = self._make_rank(rank)
+        return self._run_tool("cephfs-journal-tool", args, fs_rank, quiet)
 
     def table_tool(self, args, quiet=False):
         """
index d56f39eed011fbbc278dc09b6352f838e688f244..459077b04282333297301f70a5991bb1af6b1c92 100644 (file)
@@ -482,7 +482,7 @@ class TestDamage(CephFSTestCase):
 
         # Drop everything from the MDS cache
         self.mds_cluster.mds_stop()
-        self.fs.journal_tool(['journal', 'reset'])
+        self.fs.journal_tool(['journal', 'reset'], 0)
         self.mds_cluster.mds_fail_restart()
         self.fs.wait_for_daemons()
 
index 252354694c70c87f6794c96c223579704e2eed6d..0faeb43fc03f3df631105464ee84f2c3b7d86f2e 100644 (file)
@@ -362,9 +362,9 @@ class TestDataScan(CephFSTestCase):
         if False:
             with self.assertRaises(CommandFailedError):
                 # Normal reset should fail when no objects are present, we'll use --force instead
-                self.fs.journal_tool(["journal", "reset"])
+                self.fs.journal_tool(["journal", "reset"], 0)
 
-        self.fs.journal_tool(["journal", "reset", "--force"])
+        self.fs.journal_tool(["journal", "reset", "--force"], 0)
         self.fs.data_scan(["init"])
         self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()], worker_count=workers)
         self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()], worker_count=workers)
index 1f84e4200a399253e33be79ba303e50196383b5a..ee0b1c92b1992e64bae7cfff36a31f205dad890a 100644 (file)
@@ -44,7 +44,7 @@ class TestFlush(CephFSTestCase):
 
         # ...and the journal is truncated to just a single subtreemap from the
         # newly created segment
-        summary_output = self.fs.journal_tool(["event", "get", "summary"])
+        summary_output = self.fs.journal_tool(["event", "get", "summary"], 0)
         try:
             self.assertEqual(summary_output,
                              dedent(
@@ -72,7 +72,7 @@ class TestFlush(CephFSTestCase):
                              ).strip())
             flush_data = self.fs.mds_asok(["flush", "journal"])
             self.assertEqual(flush_data['return_code'], 0)
-            self.assertEqual(self.fs.journal_tool(["event", "get", "summary"]),
+            self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0),
                              dedent(
                                  """
                                  Events by type:
index 1f80366af0cf98ccf8b9cec365a9ba182f1df6b8..e165780f31f188641fd527474e7155482bc14c6a 100644 (file)
@@ -242,10 +242,10 @@ class TestForwardScrub(CephFSTestCase):
         # is all that will be in the InoTable in memory)
 
         self.fs.journal_tool(["event", "splice",
-            "--inode={0}".format(inos["./file2_sixmegs"]), "summary"])
+                              "--inode={0}".format(inos["./file2_sixmegs"]), "summary"], 0)
 
         self.fs.journal_tool(["event", "splice",
-            "--inode={0}".format(inos["./file3_sixmegs"]), "summary"])
+                              "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0)
 
         # Revert to old inotable.
         for key, value in inotable_copy.iteritems():
index 64fe939804ee25ca847edd552cbbf7ce3351546b..5f956be93aaab857b6cd29f03ff12972bc1ca569 100644 (file)
@@ -82,13 +82,14 @@ class TestJournalMigration(CephFSTestCase):
             ))
 
         # Verify that cephfs-journal-tool can now read the rewritten journal
-        inspect_out = self.fs.journal_tool(["journal", "inspect"])
+        inspect_out = self.fs.journal_tool(["journal", "inspect"], 0)
         if not inspect_out.endswith(": OK"):
             raise RuntimeError("Unexpected journal-tool result: '{0}'".format(
                 inspect_out
             ))
 
-        self.fs.journal_tool(["event", "get", "json", "--path", "/tmp/journal.json"])
+        self.fs.journal_tool(["event", "get", "json",
+                              "--path", "/tmp/journal.json"], 0)
         p = self.fs.tool_remote.run(
             args=[
                 "python",
index 3a82b39d0c71d4bac9deebe0375d02818e1a073b..2c7e34e99781e83f4f44495e7d1fb8ca421171c4 100644 (file)
@@ -77,7 +77,7 @@ class TestJournalRepair(CephFSTestCase):
         self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
 
         # Execute the dentry recovery, this should populate the backing store
-        self.fs.journal_tool(['event', 'recover_dentries', 'list'])
+        self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
 
         # Dentries in ROOT_INO are present
         self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head']))
@@ -87,7 +87,7 @@ class TestJournalRepair(CephFSTestCase):
 
         # Now check the MDS can read what we wrote: truncate the journal
         # and start the mds.
-        self.fs.journal_tool(['journal', 'reset'])
+        self.fs.journal_tool(['journal', 'reset'], 0)
         self.fs.mds_fail_restart()
         self.fs.wait_for_daemons()
 
@@ -265,10 +265,10 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.mds_stop(active_mds_names[0])
         self.fs.mds_fail(active_mds_names[0])
         # Invoke recover_dentries quietly, because otherwise log spews millions of lines
-        self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=0, quiet=True)
-        self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=1, quiet=True)
+        self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True)
+        self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True)
         self.fs.table_tool(["0", "reset", "session"])
-        self.fs.journal_tool(["journal", "reset"], rank=0)
+        self.fs.journal_tool(["journal", "reset"], 0)
         self.fs.erase_mds_objects(1)
         self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
                 '--yes-i-really-mean-it')
index 6abd9ee5f330dc7f618bed5c6a424ef8d49ee5d4..97049b9c0a3374af448c708d1d34370380f3a8ab 100644 (file)
@@ -149,7 +149,7 @@ class TestRecoveryPool(CephFSTestCase):
         if False:
             with self.assertRaises(CommandFailedError):
                 # Normal reset should fail when no objects are present, we'll use --force instead
-                self.fs.journal_tool(["journal", "reset"])
+                self.fs.journal_tool(["journal", "reset"], 0)
 
         self.fs.mds_stop()
         self.fs.data_scan(['scan_extents', '--alternate-pool',
@@ -159,22 +159,18 @@ class TestRecoveryPool(CephFSTestCase):
                            recovery_pool, '--filesystem', self.fs.name,
                            '--force-corrupt', '--force-init',
                            self.fs.get_data_pool_name()])
-        self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'event',
-                              'recover_dentries', 'list',
-                              '--alternate-pool', recovery_pool])
+        self.fs.journal_tool(['event', 'recover_dentries', 'list',
+                              '--alternate-pool', recovery_pool], 0)
 
         self.fs.data_scan(['init', '--force-init', '--filesystem',
                            self.fs.name])
         self.fs.data_scan(['scan_inodes', '--filesystem', self.fs.name,
                            '--force-corrupt', '--force-init',
                            self.fs.get_data_pool_name()])
-        self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'event',
-                              'recover_dentries', 'list'])
+        self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
 
-        self.fs.journal_tool(['--rank=' + recovery_fs + ":0", 'journal',
-                              'reset', '--force'])
-        self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'journal',
-                              'reset', '--force'])
+        self.recovery_fs.journal_tool(['journal', 'reset', '--force'], 0)
+        self.fs.journal_tool(['journal', 'reset', '--force'], 0)
         self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired',
                                             recovery_fs + ":0")
 
index 525b6aa463149e58c944c03d0d29120609d3958d..3fe01ed63f19e0bce8550d562c24ca5ffaae37cb 100755 (executable)
@@ -3,7 +3,7 @@
 set -e
 set -x
 
-export BIN="${BIN:-cephfs-journal-tool}"
+export BIN="${BIN:-cephfs-journal-tool --rank=cephfs:0}"
 export JOURNAL_FILE=/tmp/journal.bin
 export JSON_OUTPUT=/tmp/json.tmp
 export BINARY_OUTPUT=/tmp/binary.tmp