client_id = 'test_new_cephfs'
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
self.run_ceph_cmd(f'fs set {new_fs_name} joinable true')
+ self.run_ceph_cmd(f'fs set {new_fs_name} refuse_client_session false')
sleep(5)
# authorize a cephx ID access to the renamed file system.
new_fs_name = 'new_cephfs'
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
self.run_ceph_cmd(f'fs set {new_fs_name} joinable true')
+ self.run_ceph_cmd(f'fs set {new_fs_name} refuse_client_session false')
sleep(5)
# original file system name does not appear in `fs ls` command
data_pool = self.fs.get_data_pool_name()
metadata_pool = self.fs.get_metadata_pool_name()
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
self.run_ceph_cmd(f'fs set {new_fs_name} joinable true')
+ self.run_ceph_cmd(f'fs set {new_fs_name} refuse_client_session false')
sleep(5)
try:
That renaming a file system without '--yes-i-really-mean-it' flag fails.
"""
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
try:
self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs")
self.fail("expected renaming of file system without the "
"'--yes-i-really-mean-it' flag to fail ")
self.run_ceph_cmd(f'fs set {self.fs.name} joinable true')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session false')
def test_fs_rename_fails_for_non_existent_fs(self):
"""
That renaming a non-existent file system fails.
"""
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
try:
self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
try:
self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
else:
self.fail("expected renaming to a new file system name that is already in use to fail.")
self.run_ceph_cmd(f'fs set {self.fs.name} joinable true')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session false')
def test_fs_rename_fails_with_mirroring_enabled(self):
"""
self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}')
self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
sleep(5)
try:
self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
self.fail("expected renaming of a mirrored file system to fail")
self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}')
self.run_ceph_cmd(f'fs set {self.fs.name} joinable true')
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session false')
def test_rename_when_fs_is_online(self):
'''
'''
client_id = 'test_new_cephfs'
new_fs_name = 'new_cephfs'
+
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session true')
self.negtest_ceph_cmd(
args=(f'fs rename {self.fs.name} {new_fs_name} '
'--yes-i-really-mean-it'),
"renaming a CephFS, it must be marked as down. See "
"`ceph fs fail`."),
retval=errno.EPERM)
+ self.run_ceph_cmd(f'fs set {self.fs.name} refuse_client_session false')
self.fs.getinfo()
keyring = self.fs.authorize(client_id, ('/', 'rw'))
self.fs.get_metadata_pool_name(), 'cephfs', 'metadata',
self.fs.name)
+ def test_rename_when_clients_not_refused(self):
+ '''
+ Test that "ceph fs rename" fails when client_refuse_session is not
+ set.
+ '''
+ self.mount_a.umount_wait(require_clean=True)
+
+ self.run_ceph_cmd(f'fs fail {self.fs.name}')
+ self.negtest_ceph_cmd(
+ args=f"fs rename {self.fs.name} new_fs --yes-i-really-mean-it",
+ errmsgs=(f"CephFS '{self.fs.name}' doesn't refuse clients. "
+ "Before renaming a CephFS, flag "
+ "'refuse_client_session' must be set. See "
+ "`ceph fs set`."),
+ retval=errno.EPERM)
+ self.run_ceph_cmd(f'fs fail {self.fs.name}')
+
class TestDump(CephFSTestCase):
CLIENTS_REQUIRED = 0
new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
self.run_ceph_cmd(f'fs fail {oldvolname}')
+ self.run_ceph_cmd(f'fs set {oldvolname} refuse_client_session true')
self._fs_cmd("volume", "rename", oldvolname, newvolname,
"--yes-i-really-mean-it")
self.run_ceph_cmd(f'fs set {newvolname} joinable true')
+ self.run_ceph_cmd(f'fs set {newvolname} refuse_client_session false')
volumels = json.loads(self._fs_cmd('volume', 'ls'))
volnames = [volume['name'] for volume in volumels]
new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
self.run_ceph_cmd(f'fs fail {oldvolname}')
+ self.run_ceph_cmd(f'fs set {oldvolname} refuse_client_session true')
self._fs_cmd("volume", "rename", oldvolname, newvolname,
"--yes-i-really-mean-it")
self._fs_cmd("volume", "rename", oldvolname, newvolname,
"--yes-i-really-mean-it")
self.run_ceph_cmd(f'fs set {newvolname} joinable true')
+ self.run_ceph_cmd(f'fs set {newvolname} refuse_client_session false')
volumels = json.loads(self._fs_cmd('volume', 'ls'))
volnames = [volume['name'] for volume in volumels]
newvolname = self._generate_random_volume_name()
self.run_ceph_cmd(f'fs fail {self.volname}')
+ self.run_ceph_cmd(f'fs set {self.volname} refuse_client_session true')
try:
self._fs_cmd("volume", "rename", self.volname, newvolname)
except CommandFailedError as ce:
self.fail("expected renaming of FS volume to fail without the "
"'--yes-i-really-mean-it' flag")
self.run_ceph_cmd(f'fs set {self.volname} joinable true')
+ self.run_ceph_cmd(f'fs set {self.volname} refuse_client_session false')
def test_volume_rename_for_more_than_one_data_pool(self):
"""
new_metadata_pool = f"cephfs.{newvolname}.meta"
self.run_ceph_cmd(f'fs fail {oldvolname}')
- self._fs_cmd("volume", "rename", self.volname, newvolname,
+ self.run_ceph_cmd(f'fs set {oldvolname} refuse_client_session true')
+ self._fs_cmd("volume", "rename", oldvolname, newvolname,
"--yes-i-really-mean-it")
self.run_ceph_cmd(f'fs set {newvolname} joinable true')
+ self.run_ceph_cmd(f'fs set {newvolname} refuse_client_session false')
volumels = json.loads(self._fs_cmd('volume', 'ls'))
volnames = [volume['name'] for volume in volumels]
m.umount_wait()
newvolname = self._generate_random_volume_name()
+ self.run_ceph_cmd(f'fs set {self.volname} refuse_client_session true')
self.negtest_ceph_cmd(
args=(f'fs volume rename {self.volname} {newvolname} '
'--yes-i-really-mean-it'),
"renaming a CephFS, it must be marked as down. See "
"`ceph fs fail`."),
retval=errno.EPERM)
+ self.run_ceph_cmd(f'fs set {self.volname} refuse_client_session false')
+
+ def test_rename_when_clients_arent_refused(self):
+ newvolname = self._generate_random_volume_name()
+ for m in self.mounts:
+ m.umount_wait()
+
+ self.run_ceph_cmd(f'fs fail {self.volname}')
+ self.negtest_ceph_cmd(
+ args=(f'fs volume rename {self.volname} {newvolname} '
+ '--yes-i-really-mean-it'),
+ errmsgs=(f"CephFS '{self.volname}' doesn't refuse clients. "
+ "Before renaming a CephFS, flag "
+ "'refuse_client_session' must be set. See "
+ "`ceph fs set`."),
+ retval=errno.EPERM)
class TestSubvolumeGroups(TestVolumesHelper):
"""Tests for FS subvolume group operations."""
return -EPERM;
}
+ // Check that refuse_client_session is set.
+ if (!fsp->get_mds_map().test_flag(CEPH_MDSMAP_REFUSE_CLIENT_SESSION)) {
+ ss << "CephFS '" << fs_name << "' doesn't refuse clients. Before "
+ << "renaming a CephFS, flag 'refuse_client_session' must be set. "
+ << "See `ceph fs set`.";
+ return -EPERM;
+ }
+
for (const auto p : fsp->get_mds_map().get_data_pools()) {
mon->osdmon()->do_application_enable(p,
pg_pool_t::APPLICATION_NAME_CEPHFS,