"""
return ""
+ def _make_rank(self, rank):
+ return "{}:{}".format(self.name, rank)
+
def _run_tool(self, tool, args, rank=None, quiet=False):
# Tests frequently have [client] configuration that jacks up
# the objecter log level (unlikely to be interesting here)
base_args = [os.path.join(self._prefix, tool), '--debug-mds=4', '--debug-objecter=1']
if rank is not None:
- base_args.extend(["--rank", "%d" % rank])
+ base_args.extend(["--rank", "%s" % str(rank)])
t1 = datetime.datetime.now()
r = self.tool_remote.run(
mds_id = self.mds_ids[0]
return self.mds_daemons[mds_id].remote
- def journal_tool(self, args, rank=None, quiet=False):
+ def journal_tool(self, args, rank, quiet=False):
"""
- Invoke cephfs-journal-tool with the passed arguments, and return its stdout
+ Invoke cephfs-journal-tool with the passed arguments for a rank, and return its stdout
"""
- return self._run_tool("cephfs-journal-tool", args, rank, quiet)
+ fs_rank = self._make_rank(rank)
+ return self._run_tool("cephfs-journal-tool", args, fs_rank, quiet)
def table_tool(self, args, quiet=False):
"""
# Drop everything from the MDS cache
self.mds_cluster.mds_stop()
- self.fs.journal_tool(['journal', 'reset'])
+ self.fs.journal_tool(['journal', 'reset'], 0)
self.mds_cluster.mds_fail_restart()
self.fs.wait_for_daemons()
if False:
with self.assertRaises(CommandFailedError):
# Normal reset should fail when no objects are present, we'll use --force instead
- self.fs.journal_tool(["journal", "reset"])
+ self.fs.journal_tool(["journal", "reset"], 0)
- self.fs.journal_tool(["journal", "reset", "--force"])
+ self.fs.journal_tool(["journal", "reset", "--force"], 0)
self.fs.data_scan(["init"])
self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()], worker_count=workers)
self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()], worker_count=workers)
# ...and the journal is truncated to just a single subtreemap from the
# newly created segment
- summary_output = self.fs.journal_tool(["event", "get", "summary"])
+ summary_output = self.fs.journal_tool(["event", "get", "summary"], 0)
try:
self.assertEqual(summary_output,
dedent(
).strip())
flush_data = self.fs.mds_asok(["flush", "journal"])
self.assertEqual(flush_data['return_code'], 0)
- self.assertEqual(self.fs.journal_tool(["event", "get", "summary"]),
+ self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0),
dedent(
"""
Events by type:
# is all that will be in the InoTable in memory)
self.fs.journal_tool(["event", "splice",
- "--inode={0}".format(inos["./file2_sixmegs"]), "summary"])
+ "--inode={0}".format(inos["./file2_sixmegs"]), "summary"], 0)
self.fs.journal_tool(["event", "splice",
- "--inode={0}".format(inos["./file3_sixmegs"]), "summary"])
+ "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0)
# Revert to old inotable.
for key, value in inotable_copy.iteritems():
))
# Verify that cephfs-journal-tool can now read the rewritten journal
- inspect_out = self.fs.journal_tool(["journal", "inspect"])
+ inspect_out = self.fs.journal_tool(["journal", "inspect"], 0)
if not inspect_out.endswith(": OK"):
raise RuntimeError("Unexpected journal-tool result: '{0}'".format(
inspect_out
))
- self.fs.journal_tool(["event", "get", "json", "--path", "/tmp/journal.json"])
+ self.fs.journal_tool(["event", "get", "json",
+ "--path", "/tmp/journal.json"], 0)
p = self.fs.tool_remote.run(
args=[
"python",
self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
# Execute the dentry recovery, this should populate the backing store
- self.fs.journal_tool(['event', 'recover_dentries', 'list'])
+ self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
# Dentries in ROOT_INO are present
self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head']))
# Now check the MDS can read what we wrote: truncate the journal
# and start the mds.
- self.fs.journal_tool(['journal', 'reset'])
+ self.fs.journal_tool(['journal', 'reset'], 0)
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()
self.fs.mds_stop(active_mds_names[0])
self.fs.mds_fail(active_mds_names[0])
# Invoke recover_dentries quietly, because otherwise log spews millions of lines
- self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=0, quiet=True)
- self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=1, quiet=True)
+ self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True)
+ self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True)
self.fs.table_tool(["0", "reset", "session"])
- self.fs.journal_tool(["journal", "reset"], rank=0)
+ self.fs.journal_tool(["journal", "reset"], 0)
self.fs.erase_mds_objects(1)
self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
'--yes-i-really-mean-it')
if False:
with self.assertRaises(CommandFailedError):
# Normal reset should fail when no objects are present, we'll use --force instead
- self.fs.journal_tool(["journal", "reset"])
+ self.fs.journal_tool(["journal", "reset"], 0)
self.fs.mds_stop()
self.fs.data_scan(['scan_extents', '--alternate-pool',
recovery_pool, '--filesystem', self.fs.name,
'--force-corrupt', '--force-init',
self.fs.get_data_pool_name()])
- self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'event',
- 'recover_dentries', 'list',
- '--alternate-pool', recovery_pool])
+ self.fs.journal_tool(['event', 'recover_dentries', 'list',
+ '--alternate-pool', recovery_pool], 0)
self.fs.data_scan(['init', '--force-init', '--filesystem',
self.fs.name])
self.fs.data_scan(['scan_inodes', '--filesystem', self.fs.name,
'--force-corrupt', '--force-init',
self.fs.get_data_pool_name()])
- self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'event',
- 'recover_dentries', 'list'])
+ self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
- self.fs.journal_tool(['--rank=' + recovery_fs + ":0", 'journal',
- 'reset', '--force'])
- self.fs.journal_tool(['--rank=' + self.fs.name + ":0", 'journal',
- 'reset', '--force'])
+ self.recovery_fs.journal_tool(['journal', 'reset', '--force'], 0)
+ self.fs.journal_tool(['journal', 'reset', '--force'], 0)
self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired',
recovery_fs + ":0")
set -e
set -x
-export BIN="${BIN:-cephfs-journal-tool}"
+export BIN="${BIN:-cephfs-journal-tool --rank=cephfs:0}"
export JOURNAL_FILE=/tmp/journal.bin
export JSON_OUTPUT=/tmp/json.tmp
export BINARY_OUTPUT=/tmp/binary.tmp