log = logging.getLogger(__name__)
-class TestVolumes(CephFSTestCase):
+class TestVolumesHelper(CephFSTestCase):
+ """Helper class for testing FS volume, subvolume group and subvolume operations."""
TEST_VOLUME_PREFIX = "volume"
TEST_SUBVOLUME_PREFIX="subvolume"
TEST_GROUP_PREFIX="group"
self.mount_a.run_shell(['rmdir', trashpath])
def setUp(self):
- super(TestVolumes, self).setUp()
+ super(TestVolumesHelper, self).setUp()
self.volname = None
self.vol_created = False
self._enable_multi_fs()
def tearDown(self):
if self.vol_created:
self._delete_test_volume()
- super(TestVolumes, self).tearDown()
+ super(TestVolumesHelper, self).tearDown()
- def test_connection_expiration(self):
- # unmount any cephfs mounts
- self.mount_a.umount_wait()
- sessions = self._session_list()
- self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
-
- # Get the mgr to definitely mount cephfs
- subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
- sessions = self._session_list()
- self.assertEqual(len(sessions), 1)
-
- # Now wait for the mgr to expire the connection:
- self.wait_until_evicted(sessions[0]['id'], timeout=90)
+class TestVolumes(TestVolumesHelper):
+ """Tests for FS volume operations."""
def test_volume_create(self):
"""
That the volume can be created and then cleans up
else:
raise RuntimeError("expected the 'fs volume rm' command to fail.")
- def test_subvolume_marked(self):
- """
- ensure a subvolume is marked with the ceph.dir.subvolume xattr
- """
- subvolume = self._generate_random_subvolume_name()
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
-
- # getpath
- subvolpath = self._get_subvolume_path(self.volname, subvolume)
-
- # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
- # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
- # outside the subvolume
- dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
- srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
- rename_script = dedent("""
- import os
- import errno
- try:
- os.rename("{src}", "{dst}")
- except OSError as e:
- if e.errno != errno.EXDEV:
- raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
- else:
- raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
- """)
- self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath))
-
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
def test_volume_rm_arbitrary_pool_removal(self):
"""
That the arbitrary pool added to the volume out of band is removed
self.assertNotIn(pool["name"], pools,
"pool {0} exists after volume removal".format(pool["name"]))
- ### basic subvolume operations
- def test_subvolume_create_and_rm(self):
- # create subvolume
- subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+class TestSubvolumeGroups(TestVolumesHelper):
+ """Tests for FS subvolume group operations."""
+ def test_default_uid_gid_subvolume_group(self):
+ group = self._generate_random_group_name()
+ expected_uid = 0
+ expected_gid = 0
- # make sure it exists
- subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
- self.assertNotEqual(subvolpath, None)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
+ group_path = self._get_subvolume_group_path(self.volname, group)
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- # make sure its gone
+ # check group's uid and gid
+ stat = self.mount_a.stat(group_path)
+ self.assertEqual(stat['st_uid'], expected_uid)
+ self.assertEqual(stat['st_gid'], expected_gid)
+
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
+ def test_nonexistent_subvolume_group_create(self):
+ subvolume = self._generate_random_subvolume_name()
+ group = "non_existent_group"
+
+ # try, creating subvolume in a nonexistent group
try:
- self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
except CommandFailedError as ce:
if ce.exitstatus != errno.ENOENT:
raise
else:
- raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
+ raise RuntimeError("expected the 'fs subvolume create' command to fail")
- # verify trash dir is clean
- self._wait_for_trash_empty()
+ def test_nonexistent_subvolume_group_rm(self):
+ group = "non_existent_group"
- def test_subvolume_expand(self):
- """
- That a subvolume can be expanded in size and its quota matches the expected size.
- """
+ # try, remove subvolume group
+ try:
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.ENOENT:
+ raise
+ else:
+ raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
- # create subvolume
- subvolname = self._generate_random_subvolume_name()
- osize = self.DEFAULT_FILE_SIZE*1024*1024
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+ def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
+ group = self._generate_random_group_name()
+ data_pool = "invalid_pool"
+ # create group with invalid data pool layout
+ with self.assertRaises(CommandFailedError):
+ self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
- # make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+ # check whether group path is cleaned up
+ try:
+ self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.ENOENT:
+ raise
+ else:
+ raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
- # expand the subvolume
- nsize = osize*2
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
+ def test_subvolume_group_create_with_desired_data_pool_layout(self):
+ group1, group2 = self._generate_random_group_name(2)
- # verify the quota
- size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
- self.assertEqual(size, nsize)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group1)
+ group1_path = self._get_subvolume_group_path(self.volname, group1)
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
+ new_pool = "new_pool"
+ self.assertNotEqual(default_pool, new_pool)
- # verify trash dir is clean
- self._wait_for_trash_empty()
+ # add data pool
+ self.fs.add_data_pool(new_pool)
- def test_subvolume_shrink(self):
- """
- That a subvolume can be shrinked in size and its quota matches the expected size.
- """
+ # create group specifying the new data pool as its pool layout
+ self._fs_cmd("subvolumegroup", "create", self.volname, group2,
+ "--pool_layout", new_pool)
+ group2_path = self._get_subvolume_group_path(self.volname, group2)
- # create subvolume
- subvolname = self._generate_random_subvolume_name()
- osize = self.DEFAULT_FILE_SIZE*1024*1024
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+ desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
+ self.assertEqual(desired_pool, new_pool)
- # make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
- # shrink the subvolume
- nsize = osize // 2
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
+ def test_subvolume_group_create_with_desired_mode(self):
+ group1, group2 = self._generate_random_group_name(2)
+ # default mode
+ expected_mode1 = "755"
+ # desired mode
+ expected_mode2 = "777"
- # verify the quota
- size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
- self.assertEqual(size, nsize)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group1)
+ self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777")
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ group1_path = self._get_subvolume_group_path(self.volname, group1)
+ group2_path = self._get_subvolume_group_path(self.volname, group2)
- # verify trash dir is clean
- self._wait_for_trash_empty()
+ # check group's mode
+ actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
+ actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
+ self.assertEqual(actual_mode1, expected_mode1)
+ self.assertEqual(actual_mode2, expected_mode2)
- def test_subvolume_resize_fail_invalid_size(self):
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
+
+ def test_subvolume_group_create_with_desired_uid_gid(self):
"""
- That a subvolume cannot be resized to an invalid size and the quota did not change
+ That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
+ expected values.
"""
+ uid = 1000
+ gid = 1000
- osize = self.DEFAULT_FILE_SIZE*1024*1024
- # create subvolume
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+ # create subvolume group
+ subvolgroupname = self._generate_random_group_name()
+ self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
# make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+ subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
+ self.assertNotEqual(subvolgrouppath, None)
- # try to resize the subvolume with an invalid size -10
- nsize = -10
+ # verify the uid and gid
+ suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
+ sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
+ self.assertEqual(uid, suid)
+ self.assertEqual(gid, sgid)
+
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
+
+ def test_subvolume_group_create_with_invalid_data_pool_layout(self):
+ group = self._generate_random_group_name()
+ data_pool = "invalid_pool"
+ # create group with invalid data pool layout
try:
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
+ self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
+ if ce.exitstatus != errno.EINVAL:
+ raise
else:
- self.fail("expected the 'fs subvolume resize' command to fail")
+ raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
- # verify the quota did not change
- size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
- self.assertEqual(size, osize)
+ def test_subvolume_group_ls(self):
+ # tests the 'fs subvolumegroup ls' command
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ subvolumegroups = []
- # verify trash dir is clean
- self._wait_for_trash_empty()
+ #create subvolumegroups
+ subvolumegroups = self._generate_random_group_name(3)
+ for groupname in subvolumegroups:
+ self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
- def test_subvolume_resize_fail_zero_size(self):
- """
- That a subvolume cannot be resized to a zero size and the quota did not change
- """
+ subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
+ if len(subvolumegroupls) == 0:
+ raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
+ else:
+ subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
+ if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
+ raise RuntimeError("Error creating or listing subvolume groups")
- osize = self.DEFAULT_FILE_SIZE*1024*1024
- # create subvolume
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+ def test_subvolume_group_ls_for_nonexistent_volume(self):
+ # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
+ # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
- # make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+ # list subvolume groups
+ subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
+ if len(subvolumegroupls) > 0:
+ raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
- # try to resize the subvolume with size 0
- nsize = 0
- try:
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
- else:
- self.fail("expected the 'fs subvolume resize' command to fail")
+ def test_subvolumegroup_pin_distributed(self):
+ self.fs.set_max_mds(2)
+ status = self.fs.wait_for_daemons()
+ self.config_set('mds', 'mds_export_ephemeral_distributed', True)
- # verify the quota did not change
- size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
- self.assertEqual(size, osize)
+ group = "pinme"
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
+ self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
+ # (no effect on distribution) pin the group directory to 0 so rank 0 has all subtree bounds visible
+ self._fs_cmd("subvolumegroup", "pin", self.volname, group, "export", "0")
+ subvolumes = self._generate_random_subvolume_name(10)
+ for subvolume in subvolumes:
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
+ self._wait_distributed_subtrees(10, status=status)
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ # remove subvolumes
+ for subvolume in subvolumes:
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_resize_quota_lt_used_size(self):
- """
- That a subvolume can be resized to a size smaller than the current used size
- and the resulting quota matches the expected size.
- """
+ def test_subvolume_group_rm_force(self):
+ # test removing non-existing subvolume group with --force
+ group = self._generate_random_group_name()
+ try:
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
+ except CommandFailedError:
+ raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
- osize = self.DEFAULT_FILE_SIZE*1024*1024*20
- # create subvolume
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
- # make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+class TestSubvolumes(TestVolumesHelper):
+ """Tests for FS subvolume operations, except snapshot and snapshot clone."""
+ def test_async_subvolume_rm(self):
+ subvolumes = self._generate_random_subvolume_name(100)
- # create one file of 10MB
- file_size=self.DEFAULT_FILE_SIZE*10
- number_of_files=1
- log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
- number_of_files,
- file_size))
- filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
- self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
+ # create subvolumes
+ for subvolume in subvolumes:
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ self._do_subvolume_io(subvolume, number_of_files=10)
- usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
- susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
- self.assertEqual(usedsize, susedsize)
+ self.mount_a.umount_wait()
- # shrink the subvolume
- nsize = usedsize // 2
- try:
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
- except CommandFailedError:
- self.fail("expected the 'fs subvolume resize' command to succeed")
+ # remove subvolumes
+ for subvolume in subvolumes:
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- # verify the quota
- size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
- self.assertEqual(size, nsize)
+ self.mount_a.mount_wait()
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty(timeout=300)
+
+ def test_default_uid_gid_subvolume(self):
+ subvolume = self._generate_random_subvolume_name()
+ expected_uid = 0
+ expected_gid = 0
+
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ subvol_path = self._get_subvolume_path(self.volname, subvolume)
+
+ # check subvolume's uid and gid
+ stat = self.mount_a.stat(subvol_path)
+ self.assertEqual(stat['st_uid'], expected_uid)
+ self.assertEqual(stat['st_gid'], expected_gid)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
self._wait_for_trash_empty()
+ def test_nonexistent_subvolume_rm(self):
+ # remove non-existing subvolume
+ subvolume = "non_existent_subvolume"
- def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
- """
- That a subvolume cannot be resized to a size smaller than the current used size
- when --no_shrink is given and the quota did not change.
- """
+ # try, remove subvolume
+ try:
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.ENOENT:
+ raise
+ else:
+ raise RuntimeError("expected the 'fs subvolume rm' command to fail")
- osize = self.DEFAULT_FILE_SIZE*1024*1024*20
+ def test_subvolume_create_and_rm(self):
# create subvolume
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+ subvolume = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
# make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
self.assertNotEqual(subvolpath, None)
- # create one file of 10MB
- file_size=self.DEFAULT_FILE_SIZE*10
- number_of_files=1
- log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
- number_of_files,
- file_size))
- filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
- self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
-
- usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
- susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
- self.assertEqual(usedsize, susedsize)
-
- # shrink the subvolume
- nsize = usedsize // 2
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ # make sure its gone
try:
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
+ self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
+ if ce.exitstatus != errno.ENOENT:
+ raise
else:
- self.fail("expected the 'fs subvolume resize' command to fail")
-
- # verify the quota did not change
- size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
- self.assertEqual(size, osize)
-
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_resize_expand_on_full_subvolume(self):
- """
- That the subvolume can be expanded from a full subvolume and future writes succeed.
- """
-
- osize = self.DEFAULT_FILE_SIZE*1024*1024*10
- # create subvolume of quota 10MB and make sure it exists
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+ def test_subvolume_create_and_rm_in_group(self):
+ subvolume = self._generate_random_subvolume_name()
+ group = self._generate_random_group_name()
- # create one file of size 10MB and write
- file_size=self.DEFAULT_FILE_SIZE*10
- number_of_files=1
- log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
- number_of_files,
- file_size))
- filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
- self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
- # create a file of size 5MB and try write more
- file_size=file_size // 2
- number_of_files=1
- log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
- number_of_files,
- file_size))
- filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
- try:
- self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
- except CommandFailedError:
- # Not able to write. So expand the subvolume more and try writing the 5MB file again
- nsize = osize*2
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
- try:
- self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
- except CommandFailedError:
- self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
- "to succeed".format(subvolname, number_of_files, file_size))
- else:
- self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
- "to fail".format(subvolname, number_of_files, file_size))
+ # create subvolume in group
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
# verify trash dir is clean
self._wait_for_trash_empty()
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
def test_subvolume_create_idempotence(self):
# create subvolume
subvolume = self._generate_random_subvolume_name()
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_pin_export(self):
- self.fs.set_max_mds(2)
- status = self.fs.wait_for_daemons()
+ def test_subvolume_create_isolated_namespace(self):
+ """
+ Create subvolume in separate rados namespace
+ """
+ # create subvolume
subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
- self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
- path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
- path = os.path.dirname(path) # get subvolume path
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
- self._get_subtrees(status=status, rank=1)
- self._wait_subtrees([(path, 1)], status=status)
+ # get subvolume metadata
+ subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
+ self.assertNotEqual(len(subvol_info), 0)
+ self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
- # remove subvolume
+ # remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolumegroup_pin_distributed(self):
- self.fs.set_max_mds(2)
- status = self.fs.wait_for_daemons()
- self.config_set('mds', 'mds_export_ephemeral_distributed', True)
+ def test_subvolume_create_with_auto_cleanup_on_fail(self):
+ subvolume = self._generate_random_subvolume_name()
+ data_pool = "invalid_pool"
+ # create subvolume with invalid data pool layout fails
+ with self.assertRaises(CommandFailedError):
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
- group = "pinme"
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
- self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
- # (no effect on distribution) pin the group directory to 0 so rank 0 has all subtree bounds visible
- self._fs_cmd("subvolumegroup", "pin", self.volname, group, "export", "0")
- subvolumes = self._generate_random_subvolume_name(10)
- for subvolume in subvolumes:
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
- self._wait_distributed_subtrees(10, status=status)
+ # check whether subvol path is cleaned up
+ try:
+ self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
+ else:
+ self.fail("expected the 'fs subvolume getpath' command to fail")
- # remove subvolumes
- for subvolume in subvolumes:
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
+ subvol1, subvol2 = self._generate_random_subvolume_name(2)
+ group = self._generate_random_group_name()
+
+ # create group. this also helps set default pool layout for subvolumes
+ # created within the group.
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
+
+ # create subvolume in group.
+ self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
+ subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
+
+ default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
+ new_pool = "new_pool"
+ self.assertNotEqual(default_pool, new_pool)
+
+ # add data pool
+ self.fs.add_data_pool(new_pool)
+
+ # create subvolume specifying the new data pool as its pool layout
+ self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
+ "--pool_layout", new_pool)
+ subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
+
+ desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
+ self.assertEqual(desired_pool, new_pool)
+
+ self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_pin_random(self):
- self.fs.set_max_mds(2)
- self.fs.wait_for_daemons()
- self.config_set('mds', 'mds_export_ephemeral_random', True)
+ def test_subvolume_create_with_desired_mode_in_group(self):
+ subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
- subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
- self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
- # no verification
+ group = self._generate_random_group_name()
+ # default mode
+ expected_mode1 = "755"
+ # desired mode
+ expected_mode2 = "777"
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
+
+ # create subvolume in group
+ self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
+ self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
+ # check whether mode 0777 also works
+ self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
+
+ subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
+ subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
+ subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
+
+ # check subvolume's mode
+ actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
+ actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
+ actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
+ self.assertEqual(actual_mode1, expected_mode1)
+ self.assertEqual(actual_mode2, expected_mode2)
+ self.assertEqual(actual_mode3, expected_mode2)
+
+ self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_create_isolated_namespace(self):
+ def test_subvolume_create_with_desired_uid_gid(self):
"""
- Create subvolume in separate rados namespace
+ That the subvolume can be created with the desired uid and gid and its uid and gid matches the
+ expected values.
"""
+ uid = 1000
+ gid = 1000
# create subvolume
- subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
- # get subvolume metadata
- subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
- self.assertNotEqual(len(subvol_info), 0)
- self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ # verify the uid and gid
+ suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
+ sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
+ self.assertEqual(uid, suid)
+ self.assertEqual(gid, sgid)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_rm_force(self):
- # test removing non-existing subvolume with --force
- subvolume = self._generate_random_subvolume_name()
- try:
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
- except CommandFailedError:
- self.fail("expected the 'fs subvolume rm --force' command to succeed")
-
- def test_subvolume_create_with_auto_cleanup_on_fail(self):
- subvolume = self._generate_random_subvolume_name()
- data_pool = "invalid_pool"
- # create subvolume with invalid data pool layout fails
- with self.assertRaises(CommandFailedError):
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
-
- # check whether subvol path is cleaned up
- try:
- self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
- else:
- self.fail("expected the 'fs subvolume getpath' command to fail")
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
def test_subvolume_create_with_invalid_size(self):
# create subvolume with an invalid size -1
subvolume = self._generate_random_subvolume_name()
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_nonexistent_subvolume_rm(self):
- # remove non-existing subvolume
- subvolume = "non_existent_subvolume"
+ def test_subvolume_expand(self):
+ """
+ That a subvolume can be expanded in size and its quota matches the expected size.
+ """
- # try, remove subvolume
- try:
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOENT:
- raise
- else:
- raise RuntimeError("expected the 'fs subvolume rm' command to fail")
+ # create subvolume
+ subvolname = self._generate_random_subvolume_name()
+ osize = self.DEFAULT_FILE_SIZE*1024*1024
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
- def test_nonexistent_subvolume_group_create(self):
- subvolume = self._generate_random_subvolume_name()
- group = "non_existent_group"
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # try, creating subvolume in a nonexistent group
- try:
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOENT:
- raise
- else:
- raise RuntimeError("expected the 'fs subvolume create' command to fail")
+ # expand the subvolume
+ nsize = osize*2
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
- def test_default_uid_gid_subvolume(self):
- subvolume = self._generate_random_subvolume_name()
- expected_uid = 0
- expected_gid = 0
+ # verify the quota
+ size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
+ self.assertEqual(size, nsize)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_info(self):
+ # tests the 'fs subvolume info' command
+
+ subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
+ "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
+ "type", "uid", "features", "state"]
# create subvolume
+ subvolume = self._generate_random_subvolume_name()
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- subvol_path = self._get_subvolume_path(self.volname, subvolume)
- # check subvolume's uid and gid
- stat = self.mount_a.stat(subvol_path)
- self.assertEqual(stat['st_uid'], expected_uid)
- self.assertEqual(stat['st_gid'], expected_gid)
+ # get subvolume metadata
+ subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
+ for md in subvol_md:
+ self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
- # remove subvolume
+ self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
+ self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
+ self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
+ self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
+
+ self.assertEqual(len(subvol_info["features"]), 3,
+ msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
+ for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
+ self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
+
+ nsize = self.DEFAULT_FILE_SIZE*1024*1024
+ self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
+
+ # get subvolume metadata after quota set
+ subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
+ for md in subvol_md:
+ self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
+
+ self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
+ self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
+ self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
+ self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
+
+ self.assertEqual(len(subvol_info["features"]), 3,
+ msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
+ for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
+ self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
+
+ # remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
if len(subvolumels) > 0:
raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
- def test_subvolume_resize_infinite_size(self):
+ def test_subvolume_marked(self):
"""
- That a subvolume can be resized to an infinite size by unsetting its quota.
+ ensure a subvolume is marked with the ceph.dir.subvolume xattr
"""
+ subvolume = self._generate_random_subvolume_name()
# create subvolume
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
- str(self.DEFAULT_FILE_SIZE*1024*1024))
-
- # make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # resize inf
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
+ # getpath
+ subvolpath = self._get_subvolume_path(self.volname, subvolume)
- # verify that the quota is None
- size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
- self.assertEqual(size, None)
-
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_resize_infinite_size_future_writes(self):
- """
- That a subvolume can be resized to an infinite size and the future writes succeed.
- """
-
- # create subvolume
- subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
- str(self.DEFAULT_FILE_SIZE*1024*1024*5))
-
- # make sure it exists
- subvolpath = self._get_subvolume_path(self.volname, subvolname)
- self.assertNotEqual(subvolpath, None)
-
- # resize inf
- self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
-
- # verify that the quota is None
- size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
- self.assertEqual(size, None)
-
- # create one file of 10MB and try to write
- file_size=self.DEFAULT_FILE_SIZE*10
- number_of_files=1
- log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
- number_of_files,
- file_size))
- filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
-
- try:
- self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
- except CommandFailedError:
- self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
- "to succeed".format(subvolname, number_of_files, file_size))
+ # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
+ # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
+ # outside the subvolume
+ dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
+ srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
+ rename_script = dedent("""
+ import os
+ import errno
+ try:
+ os.rename("{src}", "{dst}")
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
+ else:
+ raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
+ """)
+ self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath))
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_info(self):
- # tests the 'fs subvolume info' command
-
- subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
- "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
- "type", "uid", "features", "state"]
-
- # create subvolume
- subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
-
- # get subvolume metadata
- subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
- for md in subvol_md:
- self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
-
- self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
- self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
- self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
- self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
-
- self.assertEqual(len(subvol_info["features"]), 3,
- msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
- for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
- self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
-
- nsize = self.DEFAULT_FILE_SIZE*1024*1024
- self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
-
- # get subvolume metadata after quota set
- subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
- for md in subvol_md:
- self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
-
- self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
- self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
- self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
- self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
-
- self.assertEqual(len(subvol_info["features"]), 3,
- msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
- for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
- self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
-
- # remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_clone_subvolume_info(self):
-
- # tests the 'fs subvolume info' command for a clone
- subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
- "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
- "type", "uid"]
+ def test_subvolume_pin_export(self):
+ self.fs.set_max_mds(2)
+ status = self.fs.wait_for_daemons()
subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
-
- # create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
+ path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
+ path = os.path.dirname(path) # get subvolume path
- # do some IO
- self._do_subvolume_io(subvolume, number_of_files=1)
-
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
-
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
-
- # check clone status
- self._wait_for_clone_to_complete(clone)
-
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
-
- subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
- if len(subvol_info) == 0:
- raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
- for md in subvol_md:
- if md not in subvol_info.keys():
- raise RuntimeError("%s not present in the metadata of subvolume" % md)
- if subvol_info["type"] != "clone":
- raise RuntimeError("type should be set to clone")
+ self._get_subtrees(status=status, rank=1)
+ self._wait_subtrees([(path, 1)], status=status)
- # remove subvolumes
+ # remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
+ def test_subvolume_pin_random(self):
+ self.fs.set_max_mds(2)
+ self.fs.wait_for_daemons()
+ self.config_set('mds', 'mds_export_ephemeral_random', True)
- ### subvolume group operations
-
- def test_subvolume_create_and_rm_in_group(self):
subvolume = self._generate_random_subvolume_name()
- group = self._generate_random_group_name()
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
-
- # create subvolume in group
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
+ # no verification
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
self._wait_for_trash_empty()
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
-
- def test_subvolume_group_create_with_desired_data_pool_layout(self):
- group1, group2 = self._generate_random_group_name(2)
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group1)
- group1_path = self._get_subvolume_group_path(self.volname, group1)
-
- default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
- new_pool = "new_pool"
- self.assertNotEqual(default_pool, new_pool)
-
- # add data pool
- self.fs.add_data_pool(new_pool)
-
- # create group specifying the new data pool as its pool layout
- self._fs_cmd("subvolumegroup", "create", self.volname, group2,
- "--pool_layout", new_pool)
- group2_path = self._get_subvolume_group_path(self.volname, group2)
-
- desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
- self.assertEqual(desired_pool, new_pool)
-
- self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
- self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
-
- def test_subvolume_group_create_with_invalid_data_pool_layout(self):
- group = self._generate_random_group_name()
- data_pool = "invalid_pool"
- # create group with invalid data pool layout
- try:
- self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.EINVAL:
- raise
- else:
- raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
+ def test_subvolume_resize_fail_invalid_size(self):
+ """
+ That a subvolume cannot be resized to an invalid size and the quota did not change
+ """
- def test_subvolume_group_rm_force(self):
- # test removing non-existing subvolume group with --force
- group = self._generate_random_group_name()
- try:
- self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
- except CommandFailedError:
- raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
+ osize = self.DEFAULT_FILE_SIZE*1024*1024
+ # create subvolume
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
- def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
- group = self._generate_random_group_name()
- data_pool = "invalid_pool"
- # create group with invalid data pool layout
- with self.assertRaises(CommandFailedError):
- self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # check whether group path is cleaned up
+ # try to resize the subvolume with an invalid size -10
+ nsize = -10
try:
- self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOENT:
- raise
+ self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
else:
- raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
-
- def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
- subvol1, subvol2 = self._generate_random_subvolume_name(2)
- group = self._generate_random_group_name()
-
- # create group. this also helps set default pool layout for subvolumes
- # created within the group.
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
-
- # create subvolume in group.
- self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
- subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
-
- default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
- new_pool = "new_pool"
- self.assertNotEqual(default_pool, new_pool)
-
- # add data pool
- self.fs.add_data_pool(new_pool)
-
- # create subvolume specifying the new data pool as its pool layout
- self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
- "--pool_layout", new_pool)
- subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
+ self.fail("expected the 'fs subvolume resize' command to fail")
- desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
- self.assertEqual(desired_pool, new_pool)
+ # verify the quota did not change
+ size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
+ self.assertEqual(size, osize)
- self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
- self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_group_create_with_desired_mode(self):
- group1, group2 = self._generate_random_group_name(2)
- # default mode
- expected_mode1 = "755"
- # desired mode
- expected_mode2 = "777"
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group1)
- self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777")
-
- group1_path = self._get_subvolume_group_path(self.volname, group1)
- group2_path = self._get_subvolume_group_path(self.volname, group2)
-
- # check group's mode
- actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
- actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
- self.assertEqual(actual_mode1, expected_mode1)
- self.assertEqual(actual_mode2, expected_mode2)
-
- self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
- self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
-
- def test_subvolume_group_create_with_desired_uid_gid(self):
+ def test_subvolume_resize_fail_zero_size(self):
"""
- That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
- expected values.
+ That a subvolume cannot be resized to a zero size and the quota did not change
"""
- uid = 1000
- gid = 1000
- # create subvolume group
- subvolgroupname = self._generate_random_group_name()
- self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
+ osize = self.DEFAULT_FILE_SIZE*1024*1024
+ # create subvolume
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
# make sure it exists
- subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
- self.assertNotEqual(subvolgrouppath, None)
-
- # verify the uid and gid
- suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
- sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
- self.assertEqual(uid, suid)
- self.assertEqual(gid, sgid)
-
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
-
- def test_subvolume_create_with_desired_mode_in_group(self):
- subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
-
- group = self._generate_random_group_name()
- # default mode
- expected_mode1 = "755"
- # desired mode
- expected_mode2 = "777"
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
-
- # create subvolume in group
- self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
- self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
- # check whether mode 0777 also works
- self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
- subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
- subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
+ # try to resize the subvolume with size 0
+ nsize = 0
+ try:
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
+ else:
+ self.fail("expected the 'fs subvolume resize' command to fail")
- # check subvolume's mode
- actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
- actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
- actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
- self.assertEqual(actual_mode1, expected_mode1)
- self.assertEqual(actual_mode2, expected_mode2)
- self.assertEqual(actual_mode3, expected_mode2)
+ # verify the quota did not change
+ size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
+ self.assertEqual(size, osize)
- self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
- self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
- self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_create_with_desired_uid_gid(self):
+ def test_subvolume_resize_quota_lt_used_size(self):
"""
- That the subvolume can be created with the desired uid and gid and its uid and gid matches the
- expected values.
+ That a subvolume can be resized to a size smaller than the current used size
+ and the resulting quota matches the expected size.
"""
- uid = 1000
- gid = 1000
+ osize = self.DEFAULT_FILE_SIZE*1024*1024*20
# create subvolume
subvolname = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
# make sure it exists
subvolpath = self._get_subvolume_path(self.volname, subvolname)
self.assertNotEqual(subvolpath, None)
- # verify the uid and gid
- suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
- sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
- self.assertEqual(uid, suid)
- self.assertEqual(gid, sgid)
-
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolname)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
+ # create one file of 10MB
+ file_size=self.DEFAULT_FILE_SIZE*10
+ number_of_files=1
+ log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
+ number_of_files,
+ file_size))
+ filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
+ self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
- def test_nonexistent_subvolume_group_rm(self):
- group = "non_existent_group"
+ usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
+ susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
+ self.assertEqual(usedsize, susedsize)
- # try, remove subvolume group
+ # shrink the subvolume
+ nsize = usedsize // 2
try:
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOENT:
- raise
- else:
- raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
-
- def test_default_uid_gid_subvolume_group(self):
- group = self._generate_random_group_name()
- expected_uid = 0
- expected_gid = 0
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
- group_path = self._get_subvolume_group_path(self.volname, group)
-
- # check group's uid and gid
- stat = self.mount_a.stat(group_path)
- self.assertEqual(stat['st_uid'], expected_uid)
- self.assertEqual(stat['st_gid'], expected_gid)
-
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
-
- def test_subvolume_group_ls(self):
- # tests the 'fs subvolumegroup ls' command
-
- subvolumegroups = []
-
- #create subvolumegroups
- subvolumegroups = self._generate_random_group_name(3)
- for groupname in subvolumegroups:
- self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
-
- subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
- if len(subvolumegroupls) == 0:
- raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
- else:
- subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
- if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
- raise RuntimeError("Error creating or listing subvolume groups")
-
- def test_subvolume_group_ls_for_nonexistent_volume(self):
- # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
- # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
-
- # list subvolume groups
- subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
- if len(subvolumegroupls) > 0:
- raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
-
- ### snapshot operations
-
- def test_subvolume_snapshot_create_and_rm(self):
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
-
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
+ except CommandFailedError:
+ self.fail("expected the 'fs subvolume resize' command to succeed")
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # verify the quota
+ size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
+ self.assertEqual(size, nsize)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_info(self):
-
+ def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
"""
- tests the 'fs subvolume snapshot info' command
+ That a subvolume cannot be resized to a size smaller than the current used size
+ when --no_shrink is given and the quota did not change.
"""
- snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
-
- subvolume = self._generate_random_subvolume_name()
- snapshot, snap_missing = self._generate_random_snapshot_name(2)
-
+ osize = self.DEFAULT_FILE_SIZE*1024*1024*20
# create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
- # do some IO
- self._do_subvolume_io(subvolume, number_of_files=1)
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ # create one file of 10MB
+ file_size=self.DEFAULT_FILE_SIZE*10
+ number_of_files=1
+ log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
+ number_of_files,
+ file_size))
+ filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
+ self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
- snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
- for md in snap_md:
- self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
- self.assertEqual(snap_info["has_pending_clones"], "no")
+ usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
+ susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
+ self.assertEqual(usedsize, susedsize)
- # snapshot info for non-existent snapshot
+ # shrink the subvolume
+ nsize = usedsize // 2
try:
- self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
+ self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
else:
- self.fail("expected snapshot info of non-existent snapshot to fail")
+ self.fail("expected the 'fs subvolume resize' command to fail")
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # verify the quota did not change
+ size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
+ self.assertEqual(size, osize)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_create_idempotence(self):
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ def test_subvolume_resize_expand_on_full_subvolume(self):
+ """
+ That the subvolume can be expanded from a full subvolume and future writes succeed.
+ """
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ osize = self.DEFAULT_FILE_SIZE*1024*1024*10
+ # create subvolume of quota 10MB and make sure it exists
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # try creating w/ same subvolume snapshot name -- should be idempotent
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ # create one file of size 10MB and write
+ file_size=self.DEFAULT_FILE_SIZE*10
+ number_of_files=1
+ log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
+ number_of_files,
+ file_size))
+ filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
+ self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # create a file of size 5MB and try write more
+ file_size=file_size // 2
+ number_of_files=1
+ log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
+ number_of_files,
+ file_size))
+ filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
+ try:
+ self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
+ except CommandFailedError:
+ # Not able to write. So expand the subvolume more and try writing the 5MB file again
+ nsize = osize*2
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
+ try:
+ self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
+ except CommandFailedError:
+ self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
+ "to succeed".format(subvolname, number_of_files, file_size))
+ else:
+ self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
+ "to fail".format(subvolname, number_of_files, file_size))
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_nonexistent_subvolume_snapshot_rm(self):
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
+ def test_subvolume_resize_infinite_size(self):
+ """
+ That a subvolume can be resized to an infinite size by unsetting its quota.
+ """
# create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
+ str(self.DEFAULT_FILE_SIZE*1024*1024))
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # resize inf
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
- # remove snapshot again
- try:
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOENT:
- raise
- else:
- raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
+ # verify that the quota is None
+ size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
+ self.assertEqual(size, None)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_snapshot_rm_force(self):
- # test removing non existing subvolume snapshot with --force
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
+ self._wait_for_trash_empty()
- # remove snapshot
- try:
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
- except CommandFailedError:
- raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
+ def test_subvolume_resize_infinite_size_future_writes(self):
+ """
+ That a subvolume can be resized to an infinite size and the future writes succeed.
+ """
- def test_subvolume_snapshot_in_group(self):
- subvolume = self._generate_random_subvolume_name()
- group = self._generate_random_group_name()
- snapshot = self._generate_random_snapshot_name()
+ # create subvolume
+ subvolname = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
+ str(self.DEFAULT_FILE_SIZE*1024*1024*5))
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- # create subvolume in group
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
+ # resize inf
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
- # snapshot subvolume in group
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
+ # verify that the quota is None
+ size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
+ self.assertEqual(size, None)
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
+ # create one file of 10MB and try to write
+ file_size=self.DEFAULT_FILE_SIZE*10
+ number_of_files=1
+ log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
+ number_of_files,
+ file_size))
+ filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
+
+ try:
+ self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
+ except CommandFailedError:
+ self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
+ "to succeed".format(subvolname, number_of_files, file_size))
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
-
- def test_subvolume_snapshot_ls(self):
- # tests the 'fs subvolume snapshot ls' command
+ def test_subvolume_rm_force(self):
+ # test removing non-existing subvolume with --force
+ subvolume = self._generate_random_subvolume_name()
+ try:
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
+ except CommandFailedError:
+ self.fail("expected the 'fs subvolume rm --force' command to succeed")
- snapshots = []
+ def test_subvolume_shrink(self):
+ """
+ That a subvolume can be shrinked in size and its quota matches the expected size.
+ """
# create subvolume
- subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ subvolname = self._generate_random_subvolume_name()
+ osize = self.DEFAULT_FILE_SIZE*1024*1024
+ self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
- # create subvolume snapshots
- snapshots = self._generate_random_snapshot_name(3)
- for snapshot in snapshots:
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ # make sure it exists
+ subvolpath = self._get_subvolume_path(self.volname, subvolname)
+ self.assertNotEqual(subvolpath, None)
- subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
- if len(subvolsnapshotls) == 0:
- self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
- else:
- snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
- if collections.Counter(snapshotnames) != collections.Counter(snapshots):
- self.fail("Error creating or listing subvolume snapshots")
+ # shrink the subvolume
+ nsize = osize // 2
+ self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
- # remove snapshot
- for snapshot in snapshots:
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # verify the quota
+ size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
+ self.assertEqual(size, nsize)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolname)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_group_snapshot_unsupported_status(self):
- group = self._generate_random_group_name()
- snapshot = self._generate_random_snapshot_name()
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
-
- # snapshot group
- try:
- self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
- else:
- self.fail("expected subvolumegroup snapshot create command to fail")
-
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+class TestSubvolumeGroupSnapshots(TestVolumesHelper):
+ """Tests for FS subvolume group snapshot operations."""
@unittest.skip("skipping subvolumegroup snapshot tests")
- def test_subvolume_group_snapshot_create_and_rm(self):
+ def test_nonexistent_subvolume_group_snapshot_rm(self):
subvolume = self._generate_random_subvolume_name()
group = self._generate_random_group_name()
snapshot = self._generate_random_snapshot_name()
# remove snapshot
self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
+ # remove snapshot
+ try:
+ self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.ENOENT:
+ raise
+ else:
+ raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
+
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@unittest.skip("skipping subvolumegroup snapshot tests")
- def test_subvolume_group_snapshot_idempotence(self):
+ def test_subvolume_group_snapshot_create_and_rm(self):
subvolume = self._generate_random_subvolume_name()
group = self._generate_random_group_name()
snapshot = self._generate_random_snapshot_name()
# snapshot group
self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
- # try creating snapshot w/ same snapshot name -- shoule be idempotent
- self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
-
# remove snapshot
self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@unittest.skip("skipping subvolumegroup snapshot tests")
- def test_nonexistent_subvolume_group_snapshot_rm(self):
+ def test_subvolume_group_snapshot_idempotence(self):
subvolume = self._generate_random_subvolume_name()
group = self._generate_random_group_name()
snapshot = self._generate_random_snapshot_name()
# snapshot group
self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
- # remove snapshot
- self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
+ # try creating snapshot w/ same snapshot name -- shoule be idempotent
+ self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
# remove snapshot
- try:
- self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOENT:
- raise
- else:
- raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
+ self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
# remove group
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
- @unittest.skip("skipping subvolumegroup snapshot tests")
- def test_subvolume_group_snapshot_rm_force(self):
- # test removing non-existing subvolume group snapshot with --force
- group = self._generate_random_group_name()
- snapshot = self._generate_random_snapshot_name()
- # remove snapshot
- try:
- self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
- except CommandFailedError:
- raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
-
@unittest.skip("skipping subvolumegroup snapshot tests")
def test_subvolume_group_snapshot_ls(self):
# tests the 'fs subvolumegroup snapshot ls' command
if collections.Counter(snapshotnames) != collections.Counter(snapshots):
raise RuntimeError("Error creating or listing subvolume group snapshots")
- def test_async_subvolume_rm(self):
- subvolumes = self._generate_random_subvolume_name(100)
-
- # create subvolumes
- for subvolume in subvolumes:
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
- self._do_subvolume_io(subvolume, number_of_files=10)
+ @unittest.skip("skipping subvolumegroup snapshot tests")
+ def test_subvolume_group_snapshot_rm_force(self):
+ # test removing non-existing subvolume group snapshot with --force
+ group = self._generate_random_group_name()
+ snapshot = self._generate_random_snapshot_name()
+ # remove snapshot
+ try:
+ self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
+ except CommandFailedError:
+ raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
- self.mount_a.umount_wait()
+ def test_subvolume_group_snapshot_unsupported_status(self):
+ group = self._generate_random_group_name()
+ snapshot = self._generate_random_snapshot_name()
- # remove subvolumes
- for subvolume in subvolumes:
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
- self.mount_a.mount_wait()
+ # snapshot group
+ try:
+ self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
+ else:
+ self.fail("expected subvolumegroup snapshot create command to fail")
- # verify trash dir is clean
- self._wait_for_trash_empty(timeout=300)
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
- def test_mgr_eviction(self):
- # unmount any cephfs mounts
- self.mount_a.umount_wait()
- sessions = self._session_list()
- self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
- # Get the mgr to definitely mount cephfs
+class TestSubvolumeSnapshots(TestVolumesHelper):
+ """Tests for FS subvolume snapshot operations."""
+ def test_nonexistent_subvolume_snapshot_rm(self):
subvolume = self._generate_random_subvolume_name()
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
- sessions = self._session_list()
- self.assertEqual(len(sessions), 1)
-
- # Now fail the mgr, check the session was evicted
- mgr = self.mgr_cluster.get_active_id()
- self.mgr_cluster.mgr_fail(mgr)
- self.wait_until_evicted(sessions[0]['id'])
-
- def test_subvolume_upgrade_legacy_to_v1(self):
- """
- poor man's upgrade test -- rather than going through a full upgrade cycle,
- emulate subvolumes by going through the wormhole and verify if they are
- accessible.
- further ensure that a legacy volume is not updated to v2.
- """
- subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
- group = self._generate_random_group_name()
-
- # emulate a old-fashioned subvolume -- one in the default group and
- # the other in a custom group
- createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
- self.mount_a.run_shell(['mkdir', '-p', createpath1])
-
- # create group
- createpath2 = os.path.join(".", "volumes", group, subvolume2)
- self.mount_a.run_shell(['mkdir', '-p', createpath2])
+ snapshot = self._generate_random_snapshot_name()
- # this would auto-upgrade on access without anyone noticing
- subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
- self.assertNotEqual(subvolpath1, None)
- subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
- subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
- self.assertNotEqual(subvolpath2, None)
- subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # and... the subvolume path returned should be what we created behind the scene
- self.assertEqual(createpath1[1:], subvolpath1)
- self.assertEqual(createpath2[1:], subvolpath2)
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # ensure metadata file is in legacy location, with required version v1
- self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
- self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
+ # remove snapshot again
+ try:
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.ENOENT:
+ raise
+ else:
+ raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
- self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
self._wait_for_trash_empty()
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
-
- def test_subvolume_no_upgrade_v1_sanity(self):
- """
- poor man's upgrade test -- theme continues...
-
- This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
- a series of operations on the v1 subvolume to ensure they work as expected.
- """
- subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
- "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
- "type", "uid", "features", "state"]
- snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
-
+ def test_subvolume_snapshot_create_and_rm(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
- clone1, clone2 = self._generate_random_clone_name(2)
- mode = "777"
- uid = "1000"
- gid = "1000"
- # emulate a v1 subvolume -- in the default group
- subvolume_path = self._create_v1_subvolume(subvolume)
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # getpath
- subvolpath = self._get_subvolume_path(self.volname, subvolume)
- self.assertEqual(subvolpath, subvolume_path)
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # ls
- subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
- self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
- self.assertEqual(subvolumes[0]['name'], subvolume,
- "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # info
- subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
- for md in subvol_md:
- self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self.assertEqual(subvol_info["state"], "complete",
- msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
- self.assertEqual(len(subvol_info["features"]), 2,
- msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
- for feature in ['snapshot-clone', 'snapshot-autoprotect']:
- self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
- # resize
- nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
- self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
- subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
- for md in subvol_md:
- self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
- self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
+ def test_subvolume_snapshot_create_idempotence(self):
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
- # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # do some IO
- self._do_subvolume_io(subvolume, number_of_files=8)
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # snap-create
+ # try creating w/ same subvolume snapshot name -- should be idempotent
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # check clone status
- self._wait_for_clone_to_complete(clone1)
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- # ensure clone is v2
- self._assert_meta_location_and_version(self.volname, clone1, version=2)
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
- # verify clone
- self._verify_clone(subvolume, snapshot, clone1, source_version=1)
+ def test_subvolume_snapshot_info(self):
- # clone (older snapshot)
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
+ """
+ tests the 'fs subvolume snapshot info' command
+ """
- # check clone status
- self._wait_for_clone_to_complete(clone2)
+ snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
- # ensure clone is v2
- self._assert_meta_location_and_version(self.volname, clone2, version=2)
+ subvolume = self._generate_random_subvolume_name()
+ snapshot, snap_missing = self._generate_random_snapshot_name(2)
- # verify clone
- # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
- #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=1)
+
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # snap-info
snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
for md in snap_md:
self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
self.assertEqual(snap_info["has_pending_clones"], "no")
- # snap-ls
- subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
- self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
- snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
- for name in [snapshot, 'fake']:
- self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
+ # snapshot info for non-existent snapshot
+ try:
+ self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
+ else:
+ self.fail("expected snapshot info of non-existent snapshot to fail")
- # snap-rm
+ # remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
-
- # ensure volume is still at version 1
- self._assert_meta_location_and_version(self.volname, subvolume, version=1)
- # rm
+ # remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone1)
- self._fs_cmd("subvolume", "rm", self.volname, clone2)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_no_upgrade_v1_to_v2(self):
- """
- poor man's upgrade test -- theme continues...
- ensure v1 to v2 upgrades are not done automatically due to various states of v1
- """
- subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
+ def test_subvolume_snapshot_in_group(self):
+ subvolume = self._generate_random_subvolume_name()
group = self._generate_random_group_name()
+ snapshot = self._generate_random_snapshot_name()
- # emulate a v1 subvolume -- in the default group
- subvol1_path = self._create_v1_subvolume(subvolume1)
-
- # emulate a v1 subvolume -- in a custom group
- subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
-
- # emulate a v1 subvolume -- in a clone pending state
- self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
-
- # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
- subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
- self.assertEqual(subvolpath1, subvol1_path)
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
- subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
- self.assertEqual(subvolpath2, subvol2_path)
+ # create subvolume in group
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
- # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
- # use clone status, as only certain operations are allowed in pending state
- status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
- self.assertEqual(status["status"]["state"], "pending")
+ # snapshot subvolume in group
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
# remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
-
- # ensure metadata file is in v1 location, with version retained as v1
- self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
- self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
- self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
- try:
- self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
- else:
- self.fail("expected rm of subvolume undergoing clone to fail")
-
- # ensure metadata file is in v1 location, with version retained as v1
- self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
- self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
-
- # verify list subvolumes returns an empty list
- subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
- self.assertEqual(len(subvolumels), 0)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_upgrade_v1_to_v2(self):
- """
- poor man's upgrade test -- theme continues...
- ensure v1 to v2 upgrades work
- """
- subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
- group = self._generate_random_group_name()
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
- # emulate a v1 subvolume -- in the default group
- subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
+ def test_subvolume_snapshot_ls(self):
+ # tests the 'fs subvolume snapshot ls' command
- # emulate a v1 subvolume -- in a custom group
- subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
+ snapshots = []
- # this would attempt auto-upgrade on access
- subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
- self.assertEqual(subvolpath1, subvol1_path)
+ # create subvolume
+ subvolume = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
- subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
- self.assertEqual(subvolpath2, subvol2_path)
+ # create subvolume snapshots
+ snapshots = self._generate_random_snapshot_name(3)
+ for snapshot in snapshots:
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # ensure metadata file is in v2 location, with version retained as v2
- self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
- self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
+ subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
+ if len(subvolsnapshotls) == 0:
+ self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
+ else:
+ snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
+ if collections.Counter(snapshotnames) != collections.Counter(snapshots):
+ self.fail("Error creating or listing subvolume snapshots")
+
+ # remove snapshot
+ for snapshot in snapshots:
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
- self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_rm_with_snapshots(self):
+ def test_subvolume_retain_snapshot_invalid_recreate(self):
+ """
+ ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
+ """
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
+ # remove with snapshot retention
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+
+ # recreate subvolume with an invalid pool
+ data_pool = "invalid_pool"
+ try:
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
+ else:
+ self.fail("expected recreate of subvolume with invalid poolname to fail")
+
+ # fetch info
+ subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
+ self.assertEqual(subvol_info["state"], "snapshot-retained",
+ msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
+
+ # getpath
try:
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOTEMPTY:
- raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
+ self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
else:
- raise RuntimeError("expected subvolume deletion to fail")
+ self.fail("expected getpath of subvolume with retained snapshots to fail")
- # remove snapshot
+ # remove snapshot (should remove volume)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
-
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_retain_snapshot_without_snapshots(self):
+ def test_subvolume_retain_snapshot_recreate_subvolume(self):
"""
- ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
+ ensure a retained subvolume can be recreated and further snapshotted
"""
+ snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
+
subvolume = self._generate_random_subvolume_name()
+ snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # remove with snapshot retention (should remove volume, no snapshots to retain)
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
+
+ # remove with snapshot retention
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+
+ # fetch info
+ subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
+ self.assertEqual(subvol_info["state"], "snapshot-retained",
+ msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
+
+ # recreate retained subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ # fetch info
+ subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
+ self.assertEqual(subvol_info["state"], "complete",
+ msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
+
+ # snapshot info (older snapshot)
+ snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
+ for md in snap_md:
+ self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
+ self.assertEqual(snap_info["has_pending_clones"], "no")
+
+ # snap-create (new snapshot)
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
+
+ # remove with retain snapshots
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+ # list snapshots
+ subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
+ self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
+ " created subvolume snapshots")
+ snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
+ for snap in [snapshot1, snapshot2]:
+ self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
+
+ # remove snapshots (should remove volume)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
+
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_retain_snapshot_invalid_recreate(self):
+ def test_subvolume_retain_snapshot_without_snapshots(self):
"""
- ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
+ ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
"""
subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
-
- # remove with snapshot retention
+ # remove with snapshot retention (should remove volume, no snapshots to retain)
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
- # recreate subvolume with an invalid pool
- data_pool = "invalid_pool"
- try:
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
- else:
- self.fail("expected recreate of subvolume with invalid poolname to fail")
-
- # fetch info
- subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
- self.assertEqual(subvol_info["state"], "snapshot-retained",
- msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
-
- # getpath
- try:
- self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
- else:
- self.fail("expected getpath of subvolume with retained snapshots to fail")
-
- # remove snapshot (should remove volume)
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # verify list subvolumes returns an empty list
+ subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
+ self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
+ def test_subvolume_rm_with_snapshots(self):
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+
+ # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
+ try:
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.ENOTEMPTY:
+ raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
+ else:
+ raise RuntimeError("expected subvolume deletion to fail")
+
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_snapshot_protect_unprotect_sanity(self):
"""
- ensure retained clone recreate fails if its trash is not yet purged
+ Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
+ invoking the command does not cause errors, till they are removed from a subsequent release.
"""
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=64)
+
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # clone subvolume snapshot
+ # now, protect snapshot
+ self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
+
+ # schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# check clone status
self._wait_for_clone_to_complete(clone)
- # snapshot clone
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
+ # now, unprotect snapshot
+ self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
- # remove clone with snapshot retention
- self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone)
- # fake a trash entry
- self._update_fake_trash(clone)
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # clone subvolume snapshot (recreate)
+ # remove subvolumes
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_snapshot_rm_force(self):
+ # test removing non existing subvolume snapshot with --force
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+
+ # remove snapshot
try:
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
+ except CommandFailedError:
+ raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
+
+
+class TestSubvolumeSnapshotClones(TestVolumesHelper):
+ """ Tests for FS subvolume snapshot clone operations."""
+ def test_clone_subvolume_info(self):
+ # tests the 'fs subvolume info' command for a clone
+ subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
+ "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
+ "type", "uid"]
+
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+ clone = self._generate_random_clone_name()
+
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=1)
+
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
+
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+
+ subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
+ if len(subvol_info) == 0:
+ raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
+ for md in subvol_md:
+ if md not in subvol_info.keys():
+ raise RuntimeError("%s not present in the metadata of subvolume" % md)
+ if subvol_info["type"] != "clone":
+ raise RuntimeError("type should be set to clone")
+
+ # remove subvolumes
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_non_clone_status(self):
+ subvolume = self._generate_random_subvolume_name()
+
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ try:
+ self._fs_cmd("clone", "status", self.volname, subvolume)
except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
+ if ce.exitstatus != errno.ENOTSUP:
+ raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
else:
- self.fail("expected recreate of clone with purge pending to fail")
+ raise RuntimeError("expected fetching of clone status of a subvolume to fail")
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+ clone = self._generate_random_clone_name()
+ osize = self.DEFAULT_FILE_SIZE*1024*1024*12
+
+ # create subvolume, in an isolated namespace with a specified size
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize))
+
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=8)
+
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+
+ # create a pool different from current subvolume pool
+ subvol_path = self._get_subvolume_path(self.volname, subvolume)
+ default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
+ new_pool = "new_pool"
+ self.assertNotEqual(default_pool, new_pool)
+ self.fs.add_data_pool(new_pool)
- # clear fake trash entry
- self._update_fake_trash(clone, create=False)
+ # update source subvolume pool
+ self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
- # recreate subvolume
+ # schedule a clone, with NO --pool specification
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# check clone status
self._wait_for_clone_to_complete(clone)
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone)
+
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
- # remove subvolume
+ # remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_retain_snapshot_recreate_subvolume(self):
- """
- ensure a retained subvolume can be recreated and further snapshotted
- """
- snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
-
+ def test_subvolume_clone_in_progress_getpath(self):
subvolume = self._generate_random_subvolume_name()
- snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
+ snapshot = self._generate_random_snapshot_name()
+ clone = self._generate_random_clone_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
-
- # remove with snapshot retention
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
-
- # fetch info
- subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
- self.assertEqual(subvol_info["state"], "snapshot-retained",
- msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=64)
- # recreate retained subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # fetch info
- subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
- self.assertEqual(subvol_info["state"], "complete",
- msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
- # snapshot info (older snapshot)
- snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
- for md in snap_md:
- self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
- self.assertEqual(snap_info["has_pending_clones"], "no")
+ # clone should not be accessible right now
+ try:
+ self._get_subvolume_path(self.volname, clone)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.EAGAIN:
+ raise RuntimeError("invalid error code when fetching path of an pending clone")
+ else:
+ raise RuntimeError("expected fetching path of an pending clone to fail")
- # snap-create (new snapshot)
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
- # remove with retain snapshots
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+ # clone should be accessible now
+ subvolpath = self._get_subvolume_path(self.volname, clone)
+ self.assertNotEqual(subvolpath, None)
- # list snapshots
- subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
- self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
- " created subvolume snapshots")
- snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
- for snap in [snapshot1, snapshot2]:
- self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone)
- # remove snapshots (should remove volume)
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # verify list subvolumes returns an empty list
- subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
- self.assertEqual(len(subvolumels), 0)
+ # remove subvolumes
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_retain_snapshot_clone(self):
- """
- clone a snapshot from a snapshot retained subvolume
- """
+ def test_subvolume_clone_in_progress_snapshot_rm(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # store path for clone verification
- subvol_path = self._get_subvolume_path(self.volname, subvolume)
-
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=16)
+ self._do_subvolume_io(subvolume, number_of_files=64)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # remove with snapshot retention
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
-
- # clone retained subvolume snapshot
+ # schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+ # snapshot should not be deletable now
+ try:
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
+ else:
+ self.fail("expected removing source snapshot of a clone to fail")
+
# check clone status
self._wait_for_clone_to_complete(clone)
+ # clone should be accessible now
+ subvolpath = self._get_subvolume_path(self.volname, clone)
+ self.assertNotEqual(subvolpath, None)
+
# verify clone
- self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
+ self._verify_clone(subvolume, snapshot, clone)
- # remove snapshots (removes retained volume)
+ # remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # remove subvolume
+ # remove subvolumes
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
self._fs_cmd("subvolume", "rm", self.volname, clone)
- # verify list subvolumes returns an empty list
- subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
- self.assertEqual(len(subvolumels), 0)
-
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_retain_snapshot_recreate(self):
- """
- recreate a subvolume from one of its retained snapshots
- """
+ def test_subvolume_clone_in_progress_source(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
+ clone = self._generate_random_clone_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # store path for clone verification
- subvol_path = self._get_subvolume_path(self.volname, subvolume)
-
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=16)
+ self._do_subvolume_io(subvolume, number_of_files=64)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # remove with snapshot retention
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
- # recreate retained subvolume using its own snapshot to clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
+ # verify clone source
+ result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
+ source = result['status']['source']
+ self.assertEqual(source['volume'], self.volname)
+ self.assertEqual(source['subvolume'], subvolume)
+ self.assertEqual(source.get('group', None), None)
+ self.assertEqual(source['snapshot'], snapshot)
# check clone status
- self._wait_for_clone_to_complete(subvolume)
+ self._wait_for_clone_to_complete(clone)
+
+ # clone should be accessible now
+ subvolpath = self._get_subvolume_path(self.volname, clone)
+ self.assertNotEqual(subvolpath, None)
# verify clone
- self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
+ self._verify_clone(subvolume, snapshot, clone)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # remove subvolume
+ # remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
-
- # verify list subvolumes returns an empty list
- subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
- self.assertEqual(len(subvolumels), 0)
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
# verify trash dir is clean
self._wait_for_trash_empty()
+ def test_subvolume_retain_snapshot_clone(self):
+ """
+ clone a snapshot from a snapshot retained subvolume
+ """
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+ clone = self._generate_random_clone_name()
+
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ # store path for clone verification
+ subvol_path = self._get_subvolume_path(self.volname, subvolume)
+
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=16)
+
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+
+ # remove with snapshot retention
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+
+ # clone retained subvolume snapshot
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
+
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
+
+ # remove snapshots (removes retained volume)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
+
+ # verify list subvolumes returns an empty list
+ subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
+ self.assertEqual(len(subvolumels), 0)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
"""
clone a subvolume from recreated subvolume's latest snapshot
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_protect_unprotect_sanity(self):
- """
- Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
- invoking the command does not cause errors, till they are removed from a subsequent release.
- """
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
-
- # do some IO
- self._do_subvolume_io(subvolume, number_of_files=64)
-
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
-
- # now, protect snapshot
- self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
-
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
-
- # check clone status
- self._wait_for_clone_to_complete(clone)
-
- # now, unprotect snapshot
- self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
-
- # verify clone
- self._verify_clone(subvolume, snapshot, clone)
-
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
-
- # remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_snapshot_clone(self):
+ def test_subvolume_retain_snapshot_recreate(self):
+ """
+ recreate a subvolume from one of its retained snapshots
+ """
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ # store path for clone verification
+ subvol_path = self._get_subvolume_path(self.volname, subvolume)
+
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=64)
+ self._do_subvolume_io(subvolume, number_of_files=16)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+ # remove with snapshot retention
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
+
+ # recreate retained subvolume using its own snapshot to clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
# check clone status
- self._wait_for_clone_to_complete(clone)
+ self._wait_for_clone_to_complete(subvolume)
# verify clone
- self._verify_clone(subvolume, snapshot, clone)
+ self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # remove subvolumes
+ # remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
+
+ # verify list subvolumes returns an empty list
+ subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
+ self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
+ def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
"""
- Validate 'max_concurrent_clones' config option
+ ensure retained clone recreate fails if its trash is not yet purged
"""
-
- # get the default number of cloner threads
- default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
- self.assertEqual(default_max_concurrent_clones, 4)
-
- # Increase number of cloner threads
- self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
- max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
- self.assertEqual(max_concurrent_clones, 6)
-
- # Decrease number of cloner threads
- self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
- max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
- self.assertEqual(max_concurrent_clones, 2)
-
- def test_subvolume_snapshot_clone_pool_layout(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
- # add data pool
- new_pool = "new_pool"
- self.fs.add_data_pool(new_pool)
-
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
- # do some IO
- self._do_subvolume_io(subvolume, number_of_files=32)
-
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
+ # clone subvolume snapshot
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# check clone status
self._wait_for_clone_to_complete(clone)
- # verify clone
- self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
+ # snapshot clone
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
+
+ # remove clone with snapshot retention
+ self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
+
+ # fake a trash entry
+ self._update_fake_trash(clone)
+
+ # clone subvolume snapshot (recreate)
+ try:
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
+ else:
+ self.fail("expected recreate of clone with purge pending to fail")
+
+ # clear fake trash entry
+ self._update_fake_trash(clone, create=False)
+
+ # recreate subvolume
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
- subvol_path = self._get_subvolume_path(self.volname, clone)
- desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
- self.assertEqual(desired_pool, new_pool)
-
- # remove subvolumes
+ # remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_clone_with_attrs(self):
+ def test_subvolume_snapshot_attr_clone(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
- mode = "777"
- uid = "1000"
- gid = "1000"
- new_uid = "1001"
- new_gid = "1001"
- new_mode = "700"
-
# create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=32)
+ self._do_subvolume_io_mixed(subvolume)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # change subvolume attrs (to ensure clone picks up snapshot attrs)
- self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
-
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
+ def test_subvolume_snapshot_clone(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
- osize = self.DEFAULT_FILE_SIZE*1024*1024*12
- # create subvolume, in an isolated namespace with a specified size
- self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize))
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=8)
+ self._do_subvolume_io(subvolume, number_of_files=64)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # create a pool different from current subvolume pool
- subvol_path = self._get_subvolume_path(self.volname, subvolume)
- default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
- new_pool = "new_pool"
- self.assertNotEqual(default_pool, new_pool)
- self.fs.add_data_pool(new_pool)
-
- # update source subvolume pool
- self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
-
- # schedule a clone, with NO --pool specification
+ # schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# check clone status
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
- # remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone1)
- self._fs_cmd("subvolume", "rm", self.volname, clone2)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_snapshot_clone_under_group(self):
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
- group = self._generate_random_group_name()
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
-
- # do some IO
- self._do_subvolume_io(subvolume, number_of_files=32)
-
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
-
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
-
- # check clone status
- self._wait_for_clone_to_complete(clone, clone_group=group)
-
- # verify clone
- self._verify_clone(subvolume, snapshot, clone, clone_group=group)
-
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
-
- # remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone, group)
-
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_under_group_snapshot_clone(self):
- subvolume = self._generate_random_subvolume_name()
- group = self._generate_random_group_name()
- snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
-
- # create group
- self._fs_cmd("subvolumegroup", "create", self.volname, group)
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume, group)
-
- # do some IO
- self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
-
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
-
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
-
- # check clone status
- self._wait_for_clone_to_complete(clone)
-
- # verify clone
- self._verify_clone(subvolume, snapshot, clone, source_group=group)
-
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
-
- # remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
-
- # remove group
- self._fs_cmd("subvolumegroup", "rm", self.volname, group)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_subvolume_snapshot_clone_different_groups(self):
- subvolume = self._generate_random_subvolume_name()
- snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
- s_group, c_group = self._generate_random_group_name(2)
-
- # create groups
- self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
- self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group)
-
- # do some IO
- self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
-
- # snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
-
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
- '--group_name', s_group, '--target_group_name', c_group)
-
- # check clone status
- self._wait_for_clone_to_complete(clone, clone_group=c_group)
-
- # verify clone
- self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
-
- # remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
-
- # remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
- self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
-
- # remove groups
- self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
- self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
+ # remove subvolumes
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, clone1)
+ self._fs_cmd("subvolume", "rm", self.volname, clone2)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_clone_with_upgrade(self):
- """
- yet another poor man's upgrade test -- rather than going through a full
- upgrade cycle, emulate old types subvolumes by going through the wormhole
- and verify clone operation.
- further ensure that a legacy volume is not updated to v2, but clone is.
- """
+ def test_subvolume_snapshot_clone_cancel_in_progress(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
- # emulate a old-fashioned subvolume
- createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
- self.mount_a.run_shell(['mkdir', '-p', createpath])
-
- # add required xattrs to subvolume
- default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
- self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=64)
+ self._do_subvolume_io(subvolume, number_of_files=128)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # ensure metadata file is in legacy location, with required version v1
- self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
-
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
- # snapshot should not be deletable now
- try:
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
- else:
- self.fail("expected removing source snapshot of a clone to fail")
-
- # check clone status
- self._wait_for_clone_to_complete(clone)
+ # cancel on-going clone
+ self._fs_cmd("clone", "cancel", self.volname, clone)
- # verify clone
- self._verify_clone(subvolume, snapshot, clone, source_version=1)
+ # verify canceled state
+ self._check_clone_canceled(clone)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- # ensure metadata file is in v2 location, with required version v2
- self._assert_meta_location_and_version(self.volname, clone)
-
# remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
+ self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_clone_in_progress_getpath(self):
+ def test_subvolume_snapshot_clone_cancel_pending(self):
+ """
+ this test is a bit more involved compared to canceling an in-progress clone.
+ we'd need to ensure that a to-be canceled clone has still not been picked up
+ by cloner threads. exploit the fact that clones are picked up in an FCFS
+ fashion and there are four (4) cloner threads by default. When the number of
+ cloner threads increase, this test _may_ start tripping -- so, the number of
+ clone operations would need to be jacked up.
+ """
+ # default number of clone threads
+ NR_THREADS = 4
+ # good enough for 4 threads
+ NR_CLONES = 5
+ # yeh, 1gig -- we need the clone to run for sometime
+ FILE_SIZE_MB = 1024
+
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
+ clones = self._generate_random_clone_name(NR_CLONES)
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=64)
+ self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
-
- # clone should not be accessible right now
- try:
- self._get_subvolume_path(self.volname, clone)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.EAGAIN:
- raise RuntimeError("invalid error code when fetching path of an pending clone")
- else:
- raise RuntimeError("expected fetching path of an pending clone to fail")
+ # schedule clones
+ for clone in clones:
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
- # check clone status
- self._wait_for_clone_to_complete(clone)
+ to_wait = clones[0:NR_THREADS]
+ to_cancel = clones[NR_THREADS:]
- # clone should be accessible now
- subvolpath = self._get_subvolume_path(self.volname, clone)
- self.assertNotEqual(subvolpath, None)
+ # cancel pending clones and verify
+ for clone in to_cancel:
+ status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
+ self.assertEqual(status["status"]["state"], "pending")
+ self._fs_cmd("clone", "cancel", self.volname, clone)
+ self._check_clone_canceled(clone)
- # verify clone
- self._verify_clone(subvolume, snapshot, clone)
+ # let's cancel on-going clones. handle the case where some of the clones
+ # _just_ complete
+ for clone in list(to_wait):
+ try:
+ self._fs_cmd("clone", "cancel", self.volname, clone)
+ to_cancel.append(clone)
+ to_wait.remove(clone)
+ except CommandFailedError as ce:
+ if ce.exitstatus != errno.EINVAL:
+ raise RuntimeError("invalid error code when cancelling on-going clone")
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
+ for clone in to_wait:
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
+ for clone in to_cancel:
+ self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_clone_in_progress_snapshot_rm(self):
+ def test_subvolume_snapshot_clone_different_groups(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
+ s_group, c_group = self._generate_random_group_name(2)
+
+ # create groups
+ self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
+ self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
# create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=64)
+ self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
# snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
# schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
-
- # snapshot should not be deletable now
- try:
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
- except CommandFailedError as ce:
- self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
- else:
- self.fail("expected removing source snapshot of a clone to fail")
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
+ '--group_name', s_group, '--target_group_name', c_group)
# check clone status
- self._wait_for_clone_to_complete(clone)
-
- # clone should be accessible now
- subvolpath = self._get_subvolume_path(self.volname, clone)
- self.assertNotEqual(subvolpath, None)
+ self._wait_for_clone_to_complete(clone, clone_group=c_group)
# verify clone
- self._verify_clone(subvolume, snapshot, clone)
+ self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
# remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
# remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
+ self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
+
+ # remove groups
+ self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_clone_in_progress_source(self):
+ def test_subvolume_snapshot_clone_fail_with_remove(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
- clone = self._generate_random_clone_name()
+ clone1, clone2 = self._generate_random_clone_name(2)
+
+ pool_capacity = 32 * 1024 * 1024
+ # number of files required to fill up 99% of the pool
+ nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=64)
+ self._do_subvolume_io(subvolume, number_of_files=nr_files)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+ # add data pool
+ new_pool = "new_pool"
+ self.fs.add_data_pool(new_pool)
- # verify clone source
- result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
- source = result['status']['source']
- self.assertEqual(source['volume'], self.volname)
- self.assertEqual(source['subvolume'], subvolume)
- self.assertEqual(source.get('group', None), None)
- self.assertEqual(source['snapshot'], snapshot)
+ self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
+ "max_bytes", "{0}".format(pool_capacity // 4))
- # check clone status
- self._wait_for_clone_to_complete(clone)
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
- # clone should be accessible now
- subvolpath = self._get_subvolume_path(self.volname, clone)
- self.assertNotEqual(subvolpath, None)
+ # check clone status -- this should dramatically overshoot the pool quota
+ self._wait_for_clone_to_complete(clone1)
# verify clone
- self._verify_clone(subvolume, snapshot, clone)
+ self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
+
+ # wait a bit so that subsequent I/O will give pool full error
+ time.sleep(120)
+
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
+
+ # check clone status
+ self._wait_for_clone_to_fail(clone2)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone)
-
- # verify trash dir is clean
- self._wait_for_trash_empty()
-
- def test_non_clone_status(self):
- subvolume = self._generate_random_subvolume_name()
-
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
-
+ self._fs_cmd("subvolume", "rm", self.volname, clone1)
try:
- self._fs_cmd("clone", "status", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, clone2)
except CommandFailedError as ce:
- if ce.exitstatus != errno.ENOTSUP:
- raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
+ if ce.exitstatus != errno.EAGAIN:
+ raise RuntimeError("invalid error code when trying to remove failed clone")
else:
- raise RuntimeError("expected fetching of clone status of a subvolume to fail")
+ raise RuntimeError("expected error when removing a failed clone")
- # remove subvolume
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ # ... and with force, failed clone can be removed
+ self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
# verify trash dir is clean
self._wait_for_trash_empty()
self._wait_for_clone_to_complete(clone)
# verify clone
- self._verify_clone(subvolume1, snapshot, clone)
+ self._verify_clone(subvolume1, snapshot, clone)
+
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
+
+ # remove subvolumes
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_snapshot_clone_pool_layout(self):
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+ clone = self._generate_random_clone_name()
+
+ # add data pool
+ new_pool = "new_pool"
+ self.fs.add_data_pool(new_pool)
+
+ # create subvolume
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=32)
+
+ # snapshot subvolume
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
+
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
+
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
# remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+
+ subvol_path = self._get_subvolume_path(self.volname, clone)
+ desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
+ self.assertEqual(desired_pool, new_pool)
# remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
- self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_clone_fail_with_remove(self):
+ def test_subvolume_snapshot_clone_under_group(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
- clone1, clone2 = self._generate_random_clone_name(2)
-
- pool_capacity = 32 * 1024 * 1024
- # number of files required to fill up 99% of the pool
- nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
+ clone = self._generate_random_clone_name()
+ group = self._generate_random_group_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=nr_files)
+ self._do_subvolume_io(subvolume, number_of_files=32)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
- # add data pool
- new_pool = "new_pool"
- self.fs.add_data_pool(new_pool)
-
- self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
- "max_bytes", "{0}".format(pool_capacity // 4))
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
# schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
- # check clone status -- this should dramatically overshoot the pool quota
- self._wait_for_clone_to_complete(clone1)
+ # check clone status
+ self._wait_for_clone_to_complete(clone, clone_group=group)
# verify clone
- self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
-
- # wait a bit so that subsequent I/O will give pool full error
- time.sleep(120)
-
- # schedule a clone
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
-
- # check clone status
- self._wait_for_clone_to_fail(clone2)
+ self._verify_clone(subvolume, snapshot, clone, clone_group=group)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone1)
- try:
- self._fs_cmd("subvolume", "rm", self.volname, clone2)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.EAGAIN:
- raise RuntimeError("invalid error code when trying to remove failed clone")
- else:
- raise RuntimeError("expected error when removing a failed clone")
+ self._fs_cmd("subvolume", "rm", self.volname, clone, group)
- # ... and with force, failed clone can be removed
- self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_attr_clone(self):
+ def test_subvolume_snapshot_clone_with_attrs(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
+ mode = "777"
+ uid = "1000"
+ gid = "1000"
+ new_uid = "1001"
+ new_gid = "1001"
+ new_mode = "700"
+
# create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
# do some IO
- self._do_subvolume_io_mixed(subvolume)
+ self._do_subvolume_io(subvolume, number_of_files=32)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ # change subvolume attrs (to ensure clone picks up snapshot attrs)
+ self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
+
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_clone_cancel_in_progress(self):
+ def test_subvolume_snapshot_clone_with_upgrade(self):
+ """
+ yet another poor man's upgrade test -- rather than going through a full
+ upgrade cycle, emulate old types subvolumes by going through the wormhole
+ and verify clone operation.
+ further ensure that a legacy volume is not updated to v2, but clone is.
+ """
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
- # create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ # emulate a old-fashioned subvolume
+ createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
+ self.mount_a.run_shell(['mkdir', '-p', createpath])
+
+ # add required xattrs to subvolume
+ default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
+ self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=128)
+ self._do_subvolume_io(subvolume, number_of_files=64)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+ # ensure metadata file is in legacy location, with required version v1
+ self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
+
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
- # cancel on-going clone
- self._fs_cmd("clone", "cancel", self.volname, clone)
+ # snapshot should not be deletable now
+ try:
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
+ else:
+ self.fail("expected removing source snapshot of a clone to fail")
- # verify canceled state
- self._check_clone_canceled(clone)
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
+
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone, source_version=1)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ # ensure metadata file is in v2 location, with required version v2
+ self._assert_meta_location_and_version(self.volname, clone)
+
# remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify trash dir is clean
self._wait_for_trash_empty()
- def test_subvolume_snapshot_clone_cancel_pending(self):
+ def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
"""
- this test is a bit more involved compared to canceling an in-progress clone.
- we'd need to ensure that a to-be canceled clone has still not been picked up
- by cloner threads. exploit the fact that clones are picked up in an FCFS
- fashion and there are four (4) cloner threads by default. When the number of
- cloner threads increase, this test _may_ start tripping -- so, the number of
- clone operations would need to be jacked up.
+ Validate 'max_concurrent_clones' config option
"""
- # default number of clone threads
- NR_THREADS = 4
- # good enough for 4 threads
- NR_CLONES = 5
- # yeh, 1gig -- we need the clone to run for sometime
- FILE_SIZE_MB = 1024
+ # get the default number of cloner threads
+ default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
+ self.assertEqual(default_max_concurrent_clones, 4)
+
+ # Increase number of cloner threads
+ self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
+ max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
+ self.assertEqual(max_concurrent_clones, 6)
+
+ # Decrease number of cloner threads
+ self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
+ max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
+ self.assertEqual(max_concurrent_clones, 2)
+
+ def test_subvolume_under_group_snapshot_clone(self):
subvolume = self._generate_random_subvolume_name()
+ group = self._generate_random_group_name()
snapshot = self._generate_random_snapshot_name()
- clones = self._generate_random_clone_name(NR_CLONES)
+ clone = self._generate_random_clone_name()
+
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
# create subvolume
- self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, group)
# do some IO
- self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
+ self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
# snapshot subvolume
- self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
-
- # schedule clones
- for clone in clones:
- self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
- to_wait = clones[0:NR_THREADS]
- to_cancel = clones[NR_THREADS:]
+ # schedule a clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
- # cancel pending clones and verify
- for clone in to_cancel:
- status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
- self.assertEqual(status["status"]["state"], "pending")
- self._fs_cmd("clone", "cancel", self.volname, clone)
- self._check_clone_canceled(clone)
+ # check clone status
+ self._wait_for_clone_to_complete(clone)
- # let's cancel on-going clones. handle the case where some of the clones
- # _just_ complete
- for clone in list(to_wait):
- try:
- self._fs_cmd("clone", "cancel", self.volname, clone)
- to_cancel.append(clone)
- to_wait.remove(clone)
- except CommandFailedError as ce:
- if ce.exitstatus != errno.EINVAL:
- raise RuntimeError("invalid error code when cancelling on-going clone")
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone, source_group=group)
# remove snapshot
- self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
# remove subvolumes
- self._fs_cmd("subvolume", "rm", self.volname, subvolume)
- for clone in to_wait:
- self._fs_cmd("subvolume", "rm", self.volname, clone)
- for clone in to_cancel:
- self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
+ self._fs_cmd("subvolume", "rm", self.volname, clone)
+
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# verify trash dir is clean
self._wait_for_trash_empty()
+
+class TestMisc(TestVolumesHelper):
+ """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
+ def test_connection_expiration(self):
+ # unmount any cephfs mounts
+ self.mount_a.umount_wait()
+ sessions = self._session_list()
+ self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
+
+ # Get the mgr to definitely mount cephfs
+ subvolume = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ sessions = self._session_list()
+ self.assertEqual(len(sessions), 1)
+
+ # Now wait for the mgr to expire the connection:
+ self.wait_until_evicted(sessions[0]['id'], timeout=90)
+
+ def test_mgr_eviction(self):
+ # unmount any cephfs mounts
+ self.mount_a.umount_wait()
+ sessions = self._session_list()
+ self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
+
+ # Get the mgr to definitely mount cephfs
+ subvolume = self._generate_random_subvolume_name()
+ self._fs_cmd("subvolume", "create", self.volname, subvolume)
+ sessions = self._session_list()
+ self.assertEqual(len(sessions), 1)
+
+ # Now fail the mgr, check the session was evicted
+ mgr = self.mgr_cluster.get_active_id()
+ self.mgr_cluster.mgr_fail(mgr)
+ self.wait_until_evicted(sessions[0]['id'])
+
+ def test_names_can_only_be_goodchars(self):
+ """
+ Test the creating vols, subvols subvolgroups fails when their names uses
+ characters beyond [a-zA-Z0-9 -_.].
+ """
+ volname, badname = 'testvol', 'abcd@#'
+
+ with self.assertRaises(CommandFailedError):
+ self._fs_cmd('volume', 'create', badname)
+ self._fs_cmd('volume', 'create', volname)
+
+ with self.assertRaises(CommandFailedError):
+ self._fs_cmd('subvolumegroup', 'create', volname, badname)
+
+ with self.assertRaises(CommandFailedError):
+ self._fs_cmd('subvolume', 'create', volname, badname)
+ self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
+
def test_subvolume_ops_on_nonexistent_vol(self):
# tests the fs subvolume operations on non existing volume
else:
self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
- def test_names_can_only_be_goodchars(self):
+ def test_subvolume_upgrade_legacy_to_v1(self):
"""
- Test the creating vols, subvols subvolgroups fails when their names uses
- characters beyond [a-zA-Z0-9 -_.].
+ poor man's upgrade test -- rather than going through a full upgrade cycle,
+ emulate subvolumes by going through the wormhole and verify if they are
+ accessible.
+ further ensure that a legacy volume is not updated to v2.
"""
- volname, badname = 'testvol', 'abcd@#'
+ subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
+ group = self._generate_random_group_name()
- with self.assertRaises(CommandFailedError):
- self._fs_cmd('volume', 'create', badname)
- self._fs_cmd('volume', 'create', volname)
+ # emulate a old-fashioned subvolume -- one in the default group and
+ # the other in a custom group
+ createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
+ self.mount_a.run_shell(['mkdir', '-p', createpath1])
- with self.assertRaises(CommandFailedError):
- self._fs_cmd('subvolumegroup', 'create', volname, badname)
+ # create group
+ createpath2 = os.path.join(".", "volumes", group, subvolume2)
+ self.mount_a.run_shell(['mkdir', '-p', createpath2])
- with self.assertRaises(CommandFailedError):
- self._fs_cmd('subvolume', 'create', volname, badname)
- self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
+ # this would auto-upgrade on access without anyone noticing
+ subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
+ self.assertNotEqual(subvolpath1, None)
+ subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
+
+ subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
+ self.assertNotEqual(subvolpath2, None)
+ subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
+
+ # and... the subvolume path returned should be what we created behind the scene
+ self.assertEqual(createpath1[1:], subvolpath1)
+ self.assertEqual(createpath2[1:], subvolpath2)
+
+ # ensure metadata file is in legacy location, with required version v1
+ self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
+ self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ # remove group
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
+ def test_subvolume_no_upgrade_v1_sanity(self):
+ """
+ poor man's upgrade test -- theme continues...
+
+ This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
+ a series of operations on the v1 subvolume to ensure they work as expected.
+ """
+ subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
+ "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
+ "type", "uid", "features", "state"]
+ snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
+
+ subvolume = self._generate_random_subvolume_name()
+ snapshot = self._generate_random_snapshot_name()
+ clone1, clone2 = self._generate_random_clone_name(2)
+ mode = "777"
+ uid = "1000"
+ gid = "1000"
+
+ # emulate a v1 subvolume -- in the default group
+ subvolume_path = self._create_v1_subvolume(subvolume)
+
+ # getpath
+ subvolpath = self._get_subvolume_path(self.volname, subvolume)
+ self.assertEqual(subvolpath, subvolume_path)
+
+ # ls
+ subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
+ self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
+ self.assertEqual(subvolumes[0]['name'], subvolume,
+ "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
+
+ # info
+ subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
+ for md in subvol_md:
+ self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
+
+ self.assertEqual(subvol_info["state"], "complete",
+ msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
+ self.assertEqual(len(subvol_info["features"]), 2,
+ msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
+ for feature in ['snapshot-clone', 'snapshot-autoprotect']:
+ self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
+
+ # resize
+ nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
+ self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
+ subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
+ for md in subvol_md:
+ self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
+ self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
+
+ # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
+ self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
+
+ # do some IO
+ self._do_subvolume_io(subvolume, number_of_files=8)
+
+ # snap-create
+ self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
+
+ # clone
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
+
+ # check clone status
+ self._wait_for_clone_to_complete(clone1)
+
+ # ensure clone is v2
+ self._assert_meta_location_and_version(self.volname, clone1, version=2)
+
+ # verify clone
+ self._verify_clone(subvolume, snapshot, clone1, source_version=1)
+
+ # clone (older snapshot)
+ self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
+
+ # check clone status
+ self._wait_for_clone_to_complete(clone2)
+
+ # ensure clone is v2
+ self._assert_meta_location_and_version(self.volname, clone2, version=2)
+
+ # verify clone
+ # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
+ #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
+
+ # snap-info
+ snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
+ for md in snap_md:
+ self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
+ self.assertEqual(snap_info["has_pending_clones"], "no")
+
+ # snap-ls
+ subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
+ self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
+ snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
+ for name in [snapshot, 'fake']:
+ self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
+
+ # snap-rm
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
+
+ # ensure volume is still at version 1
+ self._assert_meta_location_and_version(self.volname, subvolume, version=1)
+
+ # rm
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume)
+ self._fs_cmd("subvolume", "rm", self.volname, clone1)
+ self._fs_cmd("subvolume", "rm", self.volname, clone2)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_no_upgrade_v1_to_v2(self):
+ """
+ poor man's upgrade test -- theme continues...
+ ensure v1 to v2 upgrades are not done automatically due to various states of v1
+ """
+ subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
+ group = self._generate_random_group_name()
+
+ # emulate a v1 subvolume -- in the default group
+ subvol1_path = self._create_v1_subvolume(subvolume1)
+
+ # emulate a v1 subvolume -- in a custom group
+ subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
+
+ # emulate a v1 subvolume -- in a clone pending state
+ self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
+
+ # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
+ subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
+ self.assertEqual(subvolpath1, subvol1_path)
+
+ subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
+ self.assertEqual(subvolpath2, subvol2_path)
+
+ # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
+ # use clone status, as only certain operations are allowed in pending state
+ status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
+ self.assertEqual(status["status"]["state"], "pending")
+
+ # remove snapshot
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
+ self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
+
+ # ensure metadata file is in v1 location, with version retained as v1
+ self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
+ self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
+ try:
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
+ except CommandFailedError as ce:
+ self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
+ else:
+ self.fail("expected rm of subvolume undergoing clone to fail")
+
+ # ensure metadata file is in v1 location, with version retained as v1
+ self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
+
+ # verify list subvolumes returns an empty list
+ subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
+ self.assertEqual(len(subvolumels), 0)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
+
+ def test_subvolume_upgrade_v1_to_v2(self):
+ """
+ poor man's upgrade test -- theme continues...
+ ensure v1 to v2 upgrades work
+ """
+ subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
+ group = self._generate_random_group_name()
+
+ # emulate a v1 subvolume -- in the default group
+ subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
+
+ # emulate a v1 subvolume -- in a custom group
+ subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
+
+ # this would attempt auto-upgrade on access
+ subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
+ self.assertEqual(subvolpath1, subvol1_path)
+
+ subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
+ self.assertEqual(subvolpath2, subvol2_path)
+
+ # ensure metadata file is in v2 location, with version retained as v2
+ self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
+ self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
+
+ # remove subvolume
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
+ self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()