From f65193d95534dd34b593c92e1848b181737b0561 Mon Sep 17 00:00:00 2001 From: Venky Shankar Date: Tue, 4 Sep 2018 02:20:54 -0400 Subject: [PATCH] test: make rank argument mandatory when running journal_tool Also, fix a bunch of quirky journal_tool invocations that pass "--rank" argument as the command argument rather than passing it as function argument. Fixes: https://tracker.ceph.com/issues/24780 Signed-off-by: Venky Shankar --- qa/tasks/cephfs/filesystem.py | 12 ++++++++---- qa/tasks/cephfs/test_damage.py | 2 +- qa/tasks/cephfs/test_data_scan.py | 4 ++-- qa/tasks/cephfs/test_flush.py | 4 ++-- qa/tasks/cephfs/test_forward_scrub.py | 4 ++-- qa/tasks/cephfs/test_journal_migration.py | 5 +++-- qa/tasks/cephfs/test_journal_repair.py | 10 +++++----- qa/tasks/cephfs/test_recovery_pool.py | 16 ++++++---------- qa/workunits/suites/cephfs_journal_tool_smoke.sh | 2 +- 9 files changed, 30 insertions(+), 29 deletions(-) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index b2837c2eee82d..8256e914cf03e 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -1220,6 +1220,9 @@ class Filesystem(MDSCluster): """ return "" + def _make_rank(self, rank): + return "{}:{}".format(self.name, rank) + def _run_tool(self, tool, args, rank=None, quiet=False): # Tests frequently have [client] configuration that jacks up # the objecter log level (unlikely to be interesting here) @@ -1230,7 +1233,7 @@ class Filesystem(MDSCluster): base_args = [os.path.join(self._prefix, tool), '--debug-mds=4', '--debug-objecter=1'] if rank is not None: - base_args.extend(["--rank", "%d" % rank]) + base_args.extend(["--rank", "%s" % str(rank)]) t1 = datetime.datetime.now() r = self.tool_remote.run( @@ -1252,11 +1255,12 @@ class Filesystem(MDSCluster): mds_id = self.mds_ids[0] return self.mds_daemons[mds_id].remote - def journal_tool(self, args, rank=None, quiet=False): + def journal_tool(self, args, rank, quiet=False): """ - Invoke cephfs-journal-tool with the passed arguments, and return its stdout + Invoke cephfs-journal-tool with the passed arguments for a rank, and return its stdout """ - return self._run_tool("cephfs-journal-tool", args, rank, quiet) + fs_rank = self._make_rank(rank) + return self._run_tool("cephfs-journal-tool", args, fs_rank, quiet) def table_tool(self, args, quiet=False): """ diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py index d56f39eed011f..459077b042823 100644 --- a/qa/tasks/cephfs/test_damage.py +++ b/qa/tasks/cephfs/test_damage.py @@ -482,7 +482,7 @@ class TestDamage(CephFSTestCase): # Drop everything from the MDS cache self.mds_cluster.mds_stop() - self.fs.journal_tool(['journal', 'reset']) + self.fs.journal_tool(['journal', 'reset'], 0) self.mds_cluster.mds_fail_restart() self.fs.wait_for_daemons() diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index 252354694c70c..0faeb43fc03f3 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -362,9 +362,9 @@ class TestDataScan(CephFSTestCase): if False: with self.assertRaises(CommandFailedError): # Normal reset should fail when no objects are present, we'll use --force instead - self.fs.journal_tool(["journal", "reset"]) + self.fs.journal_tool(["journal", "reset"], 0) - self.fs.journal_tool(["journal", "reset", "--force"]) + self.fs.journal_tool(["journal", "reset", "--force"], 0) self.fs.data_scan(["init"]) self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()], worker_count=workers) self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()], worker_count=workers) diff --git a/qa/tasks/cephfs/test_flush.py b/qa/tasks/cephfs/test_flush.py index 1f84e4200a399..ee0b1c92b1992 100644 --- a/qa/tasks/cephfs/test_flush.py +++ b/qa/tasks/cephfs/test_flush.py @@ -44,7 +44,7 @@ class TestFlush(CephFSTestCase): # ...and the journal is truncated to just a single subtreemap from the # newly created segment - summary_output = self.fs.journal_tool(["event", "get", "summary"]) + summary_output = self.fs.journal_tool(["event", "get", "summary"], 0) try: self.assertEqual(summary_output, dedent( @@ -72,7 +72,7 @@ class TestFlush(CephFSTestCase): ).strip()) flush_data = self.fs.mds_asok(["flush", "journal"]) self.assertEqual(flush_data['return_code'], 0) - self.assertEqual(self.fs.journal_tool(["event", "get", "summary"]), + self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0), dedent( """ Events by type: diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index 1f80366af0cf9..e165780f31f18 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -242,10 +242,10 @@ class TestForwardScrub(CephFSTestCase): # is all that will be in the InoTable in memory) self.fs.journal_tool(["event", "splice", - "--inode={0}".format(inos["./file2_sixmegs"]), "summary"]) + "--inode={0}".format(inos["./file2_sixmegs"]), "summary"], 0) self.fs.journal_tool(["event", "splice", - "--inode={0}".format(inos["./file3_sixmegs"]), "summary"]) + "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0) # Revert to old inotable. for key, value in inotable_copy.iteritems(): diff --git a/qa/tasks/cephfs/test_journal_migration.py b/qa/tasks/cephfs/test_journal_migration.py index 64fe939804ee2..5f956be93aaab 100644 --- a/qa/tasks/cephfs/test_journal_migration.py +++ b/qa/tasks/cephfs/test_journal_migration.py @@ -82,13 +82,14 @@ class TestJournalMigration(CephFSTestCase): )) # Verify that cephfs-journal-tool can now read the rewritten journal - inspect_out = self.fs.journal_tool(["journal", "inspect"]) + inspect_out = self.fs.journal_tool(["journal", "inspect"], 0) if not inspect_out.endswith(": OK"): raise RuntimeError("Unexpected journal-tool result: '{0}'".format( inspect_out )) - self.fs.journal_tool(["event", "get", "json", "--path", "/tmp/journal.json"]) + self.fs.journal_tool(["event", "get", "json", + "--path", "/tmp/journal.json"], 0) p = self.fs.tool_remote.run( args=[ "python", diff --git a/qa/tasks/cephfs/test_journal_repair.py b/qa/tasks/cephfs/test_journal_repair.py index 3a82b39d0c71d..2c7e34e99781e 100644 --- a/qa/tasks/cephfs/test_journal_repair.py +++ b/qa/tasks/cephfs/test_journal_repair.py @@ -77,7 +77,7 @@ class TestJournalRepair(CephFSTestCase): self.assertEqual(self.fs.list_dirfrag(ROOT_INO), []) # Execute the dentry recovery, this should populate the backing store - self.fs.journal_tool(['event', 'recover_dentries', 'list']) + self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0) # Dentries in ROOT_INO are present self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head'])) @@ -87,7 +87,7 @@ class TestJournalRepair(CephFSTestCase): # Now check the MDS can read what we wrote: truncate the journal # and start the mds. - self.fs.journal_tool(['journal', 'reset']) + self.fs.journal_tool(['journal', 'reset'], 0) self.fs.mds_fail_restart() self.fs.wait_for_daemons() @@ -265,10 +265,10 @@ class TestJournalRepair(CephFSTestCase): self.fs.mds_stop(active_mds_names[0]) self.fs.mds_fail(active_mds_names[0]) # Invoke recover_dentries quietly, because otherwise log spews millions of lines - self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=0, quiet=True) - self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=1, quiet=True) + self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True) + self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True) self.fs.table_tool(["0", "reset", "session"]) - self.fs.journal_tool(["journal", "reset"], rank=0) + self.fs.journal_tool(["journal", "reset"], 0) self.fs.erase_mds_objects(1) self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name, '--yes-i-really-mean-it') diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py index 6abd9ee5f330d..97049b9c0a337 100644 --- a/qa/tasks/cephfs/test_recovery_pool.py +++ b/qa/tasks/cephfs/test_recovery_pool.py @@ -149,7 +149,7 @@ class TestRecoveryPool(CephFSTestCase): if False: with self.assertRaises(CommandFailedError): # Normal reset should fail when no objects are present, we'll use --force instead - self.fs.journal_tool(["journal", "reset"]) + self.fs.journal_tool(["journal", "reset"], 0) self.fs.mds_stop() self.fs.data_scan(['scan_extents', '--alternate-pool', @@ -159,22 +159,18 @@ class TestRecoveryPool(CephFSTestCase): recovery_pool, '--filesystem', self.fs.name, '--force-corrupt', '--force-init', self.fs.get_data_pool_name()]) - self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'event', - 'recover_dentries', 'list', - '--alternate-pool', recovery_pool]) + self.fs.journal_tool(['event', 'recover_dentries', 'list', + '--alternate-pool', recovery_pool], 0) self.fs.data_scan(['init', '--force-init', '--filesystem', self.fs.name]) self.fs.data_scan(['scan_inodes', '--filesystem', self.fs.name, '--force-corrupt', '--force-init', self.fs.get_data_pool_name()]) - self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'event', - 'recover_dentries', 'list']) + self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0) - self.fs.journal_tool(['--rank=' + recovery_fs + ":0", 'journal', - 'reset', '--force']) - self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'journal', - 'reset', '--force']) + self.recovery_fs.journal_tool(['journal', 'reset', '--force'], 0) + self.fs.journal_tool(['journal', 'reset', '--force'], 0) self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', recovery_fs + ":0") diff --git a/qa/workunits/suites/cephfs_journal_tool_smoke.sh b/qa/workunits/suites/cephfs_journal_tool_smoke.sh index 525b6aa463149..3fe01ed63f19e 100755 --- a/qa/workunits/suites/cephfs_journal_tool_smoke.sh +++ b/qa/workunits/suites/cephfs_journal_tool_smoke.sh @@ -3,7 +3,7 @@ set -e set -x -export BIN="${BIN:-cephfs-journal-tool}" +export BIN="${BIN:-cephfs-journal-tool --rank=cephfs:0}" export JOURNAL_FILE=/tmp/journal.bin export JSON_OUTPUT=/tmp/json.tmp export BINARY_OUTPUT=/tmp/binary.tmp -- 2.39.5