# Load an config settings of interest
for setting in self.LOAD_SETTINGS:
setattr(self, setting, float(self.fs.mds_asok(
- ['config', 'get', setting], list(self.mds_cluster.mds_ids)[0]
+ ['config', 'get', setting], mds_id=list(self.mds_cluster.mds_ids)[0]
)[setting]))
self.configs_set = set()
def set_config(self, opt, val, rank=0, status=None):
command = ["config", "set", opt, val]
- self.rank_asok(command, rank, status=status)
+ self.rank_asok(command, rank=rank, status=status)
def set_allow_multifs(self, yes=True):
self.set_flag("enable_multiple", yes)
# to avoid waiting through reconnect on every MDS start.
self.mount_a.umount_wait()
for mds_name in self.fs.get_active_names():
- self.fs.mds_asok(["flush", "journal"], mds_name)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_name)
self.fs.fail()
self.mount_a.umount_wait()
for mds_name in self.fs.get_active_names():
- self.fs.mds_asok(["flush", "journal"], mds_name)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_name)
self.fs.fail()
# Ensure that one directory is fragmented
mds_id = self.fs.get_active_names()[0]
- self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id)
+ self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id=mds_id)
# Flush journal and stop MDS
self.mount_a.umount_wait()
- self.fs.mds_asok(["flush", "journal"], mds_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
self.fs.fail()
# Pick a dentry and wipe out its key
# Finally, close the loop by checking our injected dentry survives a merge
mds_id = self.fs.get_active_names()[0]
self.mount_a.ls("subdir") # Do an ls to ensure both frags are in cache so the merge will work
- self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id)
- self.fs.mds_asok(["flush", "journal"], mds_id)
+ self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id=mds_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
frag_obj_id = "{0:x}.00000000".format(dir_ino)
keys = self._dirfrag_keys(frag_obj_id)
self.assertListEqual(sorted(keys), sorted(["%s_head" % f for f in file_names]))
self.mount_a.run_shell(["ln", "testdir1/file1", "testdir2/link2"])
mds_id = self.fs.get_active_names()[0]
- self.fs.mds_asok(["flush", "journal"], mds_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
dirfrag1_keys = self._dirfrag_keys(dirfrag1_oid)
self.mount_a.run_shell(["touch", "testdir1/file1"])
self.mount_a.umount_wait()
- self.fs.mds_asok(["flush", "journal"], mds_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
self.fs.fail()
# repair linkage errors
self.mount_a.umount_wait()
- self.fs.mds_asok(["flush", "journal"], mds0_id)
- self.fs.mds_asok(["flush", "journal"], mds1_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds0_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds1_id)
self.fs.fail()
self.fs.radosm(["rm", "mds0_inotable"])
self.mount_a.umount_wait()
mds0_id = self.fs.get_active_names()[0]
- self.fs.mds_asok(["flush", "journal"], mds0_id)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds0_id)
# wait for mds to update removed snaps
time.sleep(10)
self.mount_a.umount_wait()
for mds_name in self.fs.get_active_names():
- self.fs.mds_asok(["flush", "journal"], mds_name)
+ self.fs.mds_asok(["flush", "journal"], mds_id=mds_name)
self.fs.fail()
self._wait_subtrees([('/d1/d2/d3/d4', 1), ('/d1/d2/d3/d4/d5/d6', 2)], status, 2)
for rank in range(3):
- self.fs.rank_tell(["flush", "journal"], rank)
+ self.fs.rank_tell(["flush", "journal"], rank=rank)
def test_apply_tag(self):
self._setup_subtrees()
inos = self._find_path_inos('d1/d2/d3/')
tag = "tag123"
- out_json = self.fs.rank_tell(["tag", "path", "/d1/d2/d3", tag], 0)
+ out_json = self.fs.rank_tell(["tag", "path", "/d1/d2/d3", tag], rank=0)
self.assertNotEqual(out_json, None)
self.assertEqual(out_json["return_code"], 0)
self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
def _check_damage(mds_rank, inos):
- all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
+ all_damage = self.fs.rank_tell(["damage", "ls"], rank=mds_rank)
damage = [d for d in all_damage if d['ino'] in inos and d['damage_type'] == "backtrace"]
return len(damage) >= len(inos)
command = "scrub start {file}".format(file=test_new_file)
def _check_and_clear_damage(ino, dtype):
- all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
+ all_damage = self.fs.rank_tell(["damage", "ls"], rank=mds_rank)
damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
for d in damage:
self.run_ceph_cmd(
mnt.run_shell(["mkdir", f"{client_path}/.snap/snap1-{test_dir}"])
mnt.run_shell(f"find {client_path}/ -type f -delete")
mnt.run_shell(["rmdir", f"{client_path}/.snap/snap1-{test_dir}"])
- perf_dump = fs.rank_tell(["perf", "dump"], 0)
+ perf_dump = fs.rank_tell(["perf", "dump"], rank=0)
self.assertNotEqual(perf_dump.get('mds_cache').get('num_strays'),
0, "mdcache.num_strays is zero")
self.assertEqual(
fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
- perf_dump = fs.rank_tell(["perf", "dump"], 0)
+ perf_dump = fs.rank_tell(["perf", "dump"], rank=0)
self.assertEqual(int(perf_dump.get('mds_cache').get('num_strays')),
0, "mdcache.num_strays is non-zero")
log.info("Running command '{command}'".format(command=command))
command_list = command.split()
- jout = self.fs.rank_tell(command_list, mds_rank)
+ jout = self.fs.rank_tell(command_list, rank=mds_rank)
log.info("command '{command}' returned '{jout}'".format(
command=command, jout=jout))
# empty mds cache. otherwise mds reintegrates stray when unlink finishes
self.mount_a.umount_wait()
- self.fs.mds_asok(['flush', 'journal'], rank_1_id)
- self.fs.mds_asok(['cache', 'drop'], rank_1_id)
+ self.fs.mds_asok(['flush', 'journal'], mds_id=rank_1_id)
+ self.fs.mds_asok(['cache', 'drop'], mds_id=rank_1_id)
self.mount_a.mount_wait()
self.mount_a.run_shell(["rm", "-f", "dir_1/original"])
self.assertEqual(self.get_mdc_stat("strays_enqueued", mds_id=rank_1_id), 0)
# Test loading unlinked dir into cache
- self.fs.mds_asok(['flush', 'journal'], rank_1_id)
- self.fs.mds_asok(['cache', 'drop'], rank_1_id)
+ self.fs.mds_asok(['flush', 'journal'], mds_id=rank_1_id)
+ self.fs.mds_asok(['cache', 'drop'], mds_id=rank_1_id)
# Shut down rank 1
self.fs.set_max_mds(1)