# Drop everything from the MDS cache
self.fs.fail()
- self.fs.journal_tool(['journal', 'reset'], 0)
+ self.fs.journal_tool(['journal', 'reset', '--yes-i-really-really-mean-it'], 0)
self.fs.set_joinable()
self.fs.wait_for_daemons()
if False:
with self.assertRaises(CommandFailedError):
# Normal reset should fail when no objects are present, we'll use --force instead
- self.fs.journal_tool(["journal", "reset"], 0)
+ self.fs.journal_tool(["journal", "reset", "--yes-i-really-really-mean-it"], 0)
- self.fs.journal_tool(["journal", "reset", "--force"], 0)
+ self.fs.journal_tool(["journal", "reset", "--force", "--yes-i-really-really-mean-it"], 0)
self.fs.data_scan(["init"])
self.fs.data_scan(["scan_extents"], worker_count=workers)
self.fs.data_scan(["scan_inodes"], worker_count=workers)
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO
-
class TestFlush(CephFSTestCase):
def test_flush(self):
self.mount_a.run_shell(["mkdir", "mydir"])
# ...and the journal is truncated to just a single subtreemap from the
# newly created segment
+ self.fs.fail()
summary_output = self.fs.journal_tool(["event", "get", "summary"], 0)
+ self.fs.set_joinable()
+ self.fs.wait_for_daemons()
try:
self.assertEqual(summary_output,
dedent(
).strip())
flush_data = self.fs.mds_asok(["flush", "journal"])
self.assertEqual(flush_data['return_code'], 0)
+
+ self.fs.fail()
self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0),
dedent(
"""
Errors: 0
"""
).strip())
+ self.fs.set_joinable()
+ self.fs.wait_for_daemons()
# Now for deletion!
# We will count the RADOS deletions and MDS file purges, to verify that
# inotable versions (due to scan_links)
self.fs.flush()
self.fs.fail()
- self.fs.journal_tool(["journal", "reset", "--force"], 0)
+ self.fs.journal_tool(["journal", "reset", "--force", "--yes-i-really-really-mean-it"], 0)
# Run cephfs-data-scan targeting only orphans
self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()])
self.fs.radosm(["rm", "{0:x}.00000000".format(dir_ino)])
- self.fs.journal_tool(['journal', 'reset'], 0)
+ self.fs.journal_tool(['journal', 'reset', '--yes-i-really-really-mean-it'], 0)
self.fs.set_joinable()
self.fs.wait_for_daemons()
self.mount_a.mount_wait()
))
# Verify that cephfs-journal-tool can now read the rewritten journal
+ self.fs.fail()
inspect_out = self.fs.journal_tool(["journal", "inspect"], 0)
if not inspect_out.endswith(": OK"):
raise RuntimeError("Unexpected journal-tool result: '{0}'".format(
if event_count < 1000:
# Approximate value of "lots", expected from having run fsstress
raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))
+ self.fs.set_joinable()
+ self.fs.wait_for_daemons()
# Do some client work to check that writing the log is still working
with self.mount_a.mounted_wait():
# Now check the MDS can read what we wrote: truncate the journal
# and start the mds.
- self.fs.journal_tool(['journal', 'reset'], 0)
+ self.fs.journal_tool(['journal', 'reset', '--yes-i-really-really-mean-it'], 0)
self.fs.set_joinable()
self.fs.wait_for_daemons()
self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True)
self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True)
self.fs.table_tool(["0", "reset", "session"])
- self.fs.journal_tool(["journal", "reset"], 0)
+ self.fs.journal_tool(["journal", "reset", "--yes-i-really-really-mean-it"], 0)
self.fs.erase_mds_objects(1)
self.run_ceph_cmd('fs', 'reset', self.fs.name,
'--yes-i-really-mean-it')
if False:
with self.assertRaises(CommandFailedError):
# Normal reset should fail when no objects are present, we'll use --force instead
- self.fs.journal_tool(["journal", "reset"], 0)
+ self.fs.journal_tool(["journal", "reset", "--yes-i-really-really-mean-it"], 0)
recovery_fs.data_scan(['scan_extents', '--alternate-pool',
recovery_pool, '--filesystem', self.fs.name,
recovery_fs.data_scan(['scan_links', '--filesystem', recovery_fs.name])
recovery_fs.journal_tool(['event', 'recover_dentries', 'list',
'--alternate-pool', recovery_pool], 0)
- recovery_fs.journal_tool(["journal", "reset", "--force"], 0)
+ recovery_fs.journal_tool(["journal", "reset", "--force", "--yes-i-really-really-mean-it"], 0)
# Start the MDS
recovery_fs.set_joinable()
ceph fs fail "$FS"
sleep 5
cephfs-journal-tool --rank="$FS":0 event recover_dentries summary
- cephfs-journal-tool --rank="$FS":0 journal reset
+ cephfs-journal-tool --rank="$FS":0 journal reset --yes-i-really-really-mean-it
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug1 --memo /tmp/memo1 "$METADATA_POOL"
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug2 --memo /tmp/memo2 --repair-nosnap "$METADATA_POOL"
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug3 --memo /tmp/memo3 --remove "$METADATA_POOL"
fi
# Can we execute a journal reset?
-$BIN journal reset
+$BIN journal reset --yes-i-really-really-mean-it
$BIN journal inspect
$BIN header get
# Metadata objects have been modified by the 'event recover_dentries' command.
# Journal is no long consistent with respect to metadata objects (especially inotable).
# To ensure mds successfully replays its journal, we need to do journal reset.
-$BIN journal reset
+$BIN journal reset --yes-i-really-really-mean-it
cephfs-table-tool all reset session