# Mount the requested number of clients
for i in range(0, self.CLIENTS_REQUIRED):
- self.mounts[i].mount()
- self.mounts[i].wait_until_mounted()
+ self.mounts[i].mount_wait()
if self.REQUIRE_RECOVERY_FILESYSTEM:
if not self.REQUIRE_FILESYSTEM:
def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
raise NotImplementedError()
+ def mount_wait(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
+ self.mount(mount_path=mount_path, mount_fs_name=mount_fs_name, mountpoint=mountpoint,
+ mount_options=mount_options)
+ self.wait_until_mounted()
+
def umount(self):
raise NotImplementedError()
self.fs.rados(["rmxattr", dir_objname, "parent"])
# readdir (fetch dirfrag) should fix testdir1's backtrace
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["ls", "testdir1"])
# flush journal entries to dirfrag objects
self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true')
self.mount_a.teardown()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
mount_a_client_id = self.mount_a.get_global_id()
# Client A creates a file. He will hold the write caps on the file, and later (simulated bug) fail
self.set_conf('client', 'client inject fixed oldest tid', 'true')
self.mount_a.teardown()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.fs.mds_asok(['config', 'set', 'mds_max_completed_requests', '{0}'.format(max_requests)])
self.mount_a.run_shell(["mkdir", "subdir"])
self.mount_a.umount_wait()
self.set_conf('client', 'client mountpoint', '/subdir')
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
root_ino = self.mount_a.path_to_ino(".")
self.assertEqual(root_ino, 1);
self.mount_b.check_files()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# Check that the admin socket interface is correctly reporting
# two sessions
# Check that the client that timed out during reconnect can
# mount again and do I/O
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.create_destroy()
self.assert_session_count(2)
self.mount_a.kill_cleanup()
# Bring the client back
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.create_destroy()
def _test_stale_caps(self, write):
else:
self.mount_a.run_shell(["touch", "background_file"])
self.mount_a.umount_wait()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
cap_holder = self.mount_a.open_background(write=False)
self.assert_session_count(2)
self.mount_a.kill_cleanup()
# Bring the client back
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
def test_dir_fsync(self):
self._test_fsync(True);
log.info("Reached active...")
# Is the child dentry visible from mount B?
- self.mount_b.mount()
- self.mount_b.wait_until_mounted()
+ self.mount_b.mount_wait()
self.mount_b.run_shell(["ls", "subdir/childfile"])
def test_unmount_for_evicted_client(self):
log.info("Daemons came up after mutation '{0}', proceeding to ls".format(mutation.desc))
# MDS is up, should go damaged on ls or client mount
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
if mutation.ls_path == ".":
proc = self.mount_a.run_shell(["ls", "-R", mutation.ls_path], wait=False)
else:
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
dentries = self.mount_a.ls("subdir/")
# The damaged guy should have disappeared
self.assertEqual(scrub_json["raw_stats"]["passed"], False)
# Check that the file count is now correct
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files")
self.assertEqual(nfiles, "1")
self.mds_cluster.mds_fail_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
+ self.mount_a.mount_wait()
# Case 1: un-decodeable backtrace
log.info(str(self.mds_cluster.status()))
# Mount a client
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# See that the files are present and correct
errors = workload.validate()
# Start filesystem back up, observe that the file appears to be gone in an `ls`
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
files = self.mount_a.run_shell(["ls", "subdir/"]).stdout.getvalue().strip().split("\n")
self.assertListEqual(sorted(files), sorted(list(set(file_names) - set([victim_dentry]))))
# and points to the correct file data.
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
out = self.mount_a.run_shell(["cat", "subdir/{0}".format(victim_dentry)]).stdout.getvalue().strip()
self.assertEqual(out, victim_dentry)
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# link count was adjusted?
file1_nlink = self.mount_a.path_to_nlink("testdir1/file1")
self.mounts[0].umount_wait()
# Control: that we can mount and unmount usually, while the cluster is healthy
- self.mounts[0].mount()
- self.mounts[0].wait_until_mounted()
+ self.mounts[0].mount_wait()
self.mounts[0].umount_wait()
# Stop the daemon processes
self.wait_until_true(laggy, grace * 2)
with self.assertRaises(CommandFailedError):
- self.mounts[0].mount()
+ self.mounts[0].mount_wait()
def test_standby_count_wanted(self):
"""
initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued']
# Use a client to delete a file
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["rm", "-rf", "mydir"])
# Flush the journal so that the directory inode can be purged
# Create a new inode that's just in the log, i.e. would
# look orphaned to backward scan if backward scan wisnae
# respectin' tha scrub_tag xattr.
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["mkdir", "parent/unflushed"])
self.mount_a.run_shell(["dd", "if=/dev/urandom",
"of=./parent/unflushed/jfile",
self.fs.wait_for_daemons()
# See that the orphaned file is indeed missing from a client's POV
- self.mount_a.mount()
+ self.mount_a.mount_wait()
damaged_state = self._get_paths_to_ino()
self.assertNotIn("./parent/flushed/bravo", damaged_state)
self.mount_a.umount_wait()
# and no lost+found, and no extra inodes!
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self._validate_linkage(inos)
def _stash_inotable(self):
inotable_copy = self._stash_inotable()
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.write_n_mb("file2_sixmegs", 6)
self.mount_a.write_n_mb("file3_sixmegs", 6)
self.fs.wait_for_daemons()
# List files
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# First ls -R to populate MDCache, such that hardlinks will
# resolve properly (recover_dentries does not create backtraces,
# FIXME: hook in forward scrub here to regenerate backtraces
proc = self.mount_a.run_shell(['ls', '-R'])
self.mount_a.umount_wait() # remount to clear client cache before our second ls
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
proc = self.mount_a.run_shell(['ls', '-R'])
self.assertEqual(proc.stdout.getvalue().strip(),
self.fs.mds_fail_restart(active_mds_names[0])
self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
reject_fn=lambda v: len(v) > 1)
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["ls", "-R"], wait=True)
def test_table_tool(self):
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
+ self.mount_a.mount_wait()
# trivial sync moutn a
workunit(self.ctx, {
# on lookup/open
self.mount_b.umount_wait()
self.set_conf('client', 'client debug getattr caps', 'true')
- self.mount_b.mount()
- self.mount_b.wait_until_mounted()
+ self.mount_b.mount_wait()
# create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
# to mount_a
'allow r pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# write should fail
self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
'allow w pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# read should fail
self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True)))
))
self.mount_a.umount_wait()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
with self.assertRaises(CommandFailedError):
self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool",
self.fs.get_data_pool_names()[0],
self.fs.get_data_pool_names()[1],
))
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool",
new_pool_name)
self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool",
# Unmount and remount the client to flush cache
self.mount_a.umount_wait()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
initial_op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r']
self.mount_a.run_shell(["dd", "if=foo", "of=/dev/null", "bs=128k", "count=32"])
log.info(str(self.mds_cluster.status()))
# Mount a client
- self.mount_a.mount()
- self.mount_b.mount(mount_fs_name=recovery_fs)
- self.mount_a.wait_until_mounted()
- self.mount_b.wait_until_mounted()
+ self.mount_a.mount_wait()
+ self.mount_b.mount_wait(mount_fs_name=recovery_fs)
# See that the files are present and correct
errors = workload.validate()
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# fragstat indicates the directory is not empty, rmdir should fail
with self.assertRaises(CommandFailedError) as ar:
status = self.fs.status()
s = self._get_connection_count(status=status)
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.assertGreater(self._get_connection_count(status=status), s)
self.mount_a.umount_wait()
e = self._get_connection_count(status=status)
status = self.fs.wait_for_daemons()
# Bring the clients back
- self.mount_a.mount()
- self.mount_b.mount()
+ self.mount_a.mount_wait()
+ self.mount_b.mount_wait()
# See that they've got sessions
self.assert_session_count(2, mds_id=self.fs.get_rank(status=status)['name'])
# Configure a client that is limited to /foo/bar
self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar")
# Check he can mount that dir and do IO
- self.mount_b.mount(mount_path="/foo/bar")
- self.mount_b.wait_until_mounted()
+ self.mount_b.mount_wait(mount_path="/foo/bar")
self.mount_b.create_destroy()
self.mount_b.umount_wait()
self.assert_session_count(1, mds_id=self.fs.get_rank(rank=1, status=status)['name'])
self.mount_a.kill_cleanup()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
else:
self.assertGreater(self._get_last_created_snap(rank=0), last_created)
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
else:
self.assertGreater(self._get_last_created_snap(rank=0), last_created)
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
self.assertEqual(self._get_last_created_snap(rank=0), last_created)
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
def test_snapclient_cache(self):
"""
self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
self.fs.mds_asok(['flush', 'journal'])
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
+ self.mount_a.mount_wait()
# Unlink file_a
self.mount_a.run_shell(["rm", "-f", "dir_1/file_a"])
rank_0_id = active_mds_names[0]
rank_1_id = active_mds_names[1]
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["rm", "-f", "dir_1/original"])
self.mount_a.umount_wait()
# zero, but there's actually still a stray, so at the very
# least the StrayManager stats code is slightly off
- self.mount_a.mount()
+ self.mount_a.mount_wait()
# See that the data from the snapshotted revision of the file is still present
# and correct
# remount+flush (release client caps)
self.mount_a.umount_wait()
self.fs.mds_asok(["flush", "journal"], mds_id)
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# Create 50% more files than the current fragment limit
self.mount_a.run_python(dedent("""
m.umount_wait()
# Create a dir on mount A
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["mkdir", "parent1"])
self.mount_a.run_shell(["mkdir", "parent2"])
self.mount_a.run_shell(["mkdir", "parent1/mydir"])
# Put some files in it from mount B
- self.mount_b.mount()
+ self.mount_b.mount_wait()
self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
self.mount_b.umount_wait()
for subvolume in subvolumes:
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self.mount_a.mount()
+ self.mount_a.mount_wait()
# verify trash dir is clean
self._wait_for_trash_empty(timeout=300)