mntobj.hostfs_mntpt = self.hostfs_mntpt
+class RunCephCmd:
+
+ def get_ceph_cmd_result(self, *args, **kwargs):
+ if kwargs.get('args') is None and args:
+ if len(args) == 1:
+ args = args[0]
+ kwargs['args'] = args
+ return self.mon_manager.run_cluster_cmd(**kwargs).exitstatus
+
+
class CephFSTestCase(CephTestCase, RunCephCmd):
"""
Test case for Ceph FS, requires caller to populate Filesystem and Mounts,
'osd', f'allow rw tag cephfs data={self.fs.name}',
'mds', 'allow']
- if self.run_cluster_cmd_result(cmd) == 0:
+ if self.get_ceph_cmd_result(*cmd) == 0:
break
cmd[1] = 'add'
- if self.run_cluster_cmd_result(cmd) != 0:
+ if self.get_ceph_cmd_result(*cmd) != 0:
raise RuntimeError(f'Failed to create new client {cmd[2]}')
# wait for ranks to become active
cmd = shlex_split(cmd)
return self.fs.mon_manager.raw_cluster_cmd(*cmd)
- def run_cluster_cmd_result(self, cmd):
- if isinstance(cmd, str):
- cmd = shlex_split(cmd)
- return self.fs.mon_manager.raw_cluster_cmd_result(*cmd)
-
def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
if not (moncap or osdcap or mdscap):
if self.fs:
# Reconfigure client auth caps
for mount in self.mounts:
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(mount.client_id),
'mds', 'allow',
'mon', 'allow r',
self.fs.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id=m)
def push_balancer(self, obj, lua_code, expect):
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
self.fs.radosm(["put", obj, "-"], stdin=StringIO(lua_code))
with self.assert_cluster_log(failure + obj + " " + expect):
log.info("run a " + obj + " balancer that expects=" + expect)
self.start_mantle()
expect = " : (2) No such file or directory"
- ret = self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer')
+ ret = self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer')
assert(ret == 22) # EINVAL
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
with self.assert_cluster_log(failure + " " + expect): pass
def test_version_not_in_rados(self):
self.start_mantle()
expect = failure + "ghost.lua : (2) No such file or directory"
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
with self.assert_cluster_log(expect): pass
def test_balancer_invalid(self):
def test_balancer_valid(self):
self.start_mantle()
lua_code = "BAL_LOG(0, \"test\")\nreturn {3, 4}"
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
self.fs.radosm(["put", "valid.lua", "-"], stdin=StringIO(lua_code))
with self.assert_cluster_log(success + "valid.lua"):
log.info("run a valid.lua balancer")
# kill the OSDs so that the balancer pull from RADOS times out
osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
for i in range(0, len(osd_map['osds'])):
- self.fs.mon_manager.raw_cluster_cmd_result('osd', 'down', str(i))
- self.fs.mon_manager.raw_cluster_cmd_result('osd', 'out', str(i))
+ self.get_ceph_cmd_result('osd', 'down', str(i))
+ self.get_ceph_cmd_result('osd', 'out', str(i))
# trigger a pull from RADOS
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
# make the timeout a little longer since dead OSDs spam ceph -w
with self.assert_cluster_log(failure + "valid.lua" + expect, timeout=30):
# cleanup
for i in range(0, len(osd_map['osds'])):
- self.fs.mon_manager.raw_cluster_cmd_result('osd', 'in', str(i))
+ self.get_ceph_cmd_result('osd', 'in', str(i))
# Reconfigure client auth caps
for mount in self.mounts:
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', f"client.{mount.client_id}",
'mds', 'allow',
'mon', 'allow r',
def test_cephfs_mirror_stats(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
def test_cephfs_mirror_cancel_sync(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
def test_cephfs_mirror_restart_sync_on_blocklist(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
def test_cephfs_mirror_symlink_sync(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
def test_cephfs_mirror_incremental_sync(self):
""" Test incremental snapshot synchronization (based on mtime differences)."""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
file_z | sym dir reg sym
"""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
"""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
that all replayer threads (3 by default) in the mirror daemon are busy.
"""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
log.debug('reconfigure client auth caps')
cid = self.mount_b.client_id
data_pool = self.backup_fs.get_data_pool_name()
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', f"client.{cid}",
'mds', 'allow rw',
'mon', 'allow r',
:param cmd_args: nfs command arguments to be run
'''
cmd_func()
- ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args)
+ ret = self.get_ceph_cmd_result(*cmd_args)
if ret != 0:
self.fail("Idempotency test failed")
client_name = "client.{0}".format(self.mount_a.client_id)
# set data pool read only
- self.fs.mon_manager.raw_cluster_cmd_result(
- 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
- 'allow r pool={0}'.format(self.fs.get_data_pool_name()))
+ self.get_ceph_cmd_result(
+ 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r',
+ 'osd', 'allow r pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
self.mount_a.mount_wait()
self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
# set data pool write only
- self.fs.mon_manager.raw_cluster_cmd_result(
- 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
- 'allow w pool={0}'.format(self.fs.get_data_pool_name()))
+ self.get_ceph_cmd_result(
+ 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r',
+ 'osd', 'allow w pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
self.mount_a.mount_wait()
self.mount_a.run_shell(["mkdir", "layoutdir"])
# Set MDS 'rw' perms: missing 'p' means no setting pool layouts
- self.fs.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r',
'osd',
'allow rw pool={0},allow rw pool={1}'.format(
self.mount_a.umount_wait()
# Set MDS 'rwp' perms: should now be able to set layouts
- self.fs.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r',
'osd',
'allow rw pool={0},allow rw pool={1}'.format(
self.mount_a.umount_wait()
def tearDown(self):
- self.fs.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_a.client_id),
'mds', 'allow', 'mon', 'allow r', 'osd',
'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0]))
"""
test_pool_name = 'snap-test-pool'
base_cmd = f'osd pool create {test_pool_name}'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, 0)
self.fs.rados(["mksnap", "snap3"], pool=test_pool_name)
base_cmd = f'fs add_data_pool {self.fs.name} {test_pool_name}'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, errno.EOPNOTSUPP)
# cleanup
self.fs.rados(["rmsnap", "snap3"], pool=test_pool_name)
base_cmd = f'osd pool delete {test_pool_name}'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
def test_using_pool_with_snap_fails_fs_creation(self):
"""
Test that using a pool with snaps for fs creation fails
"""
base_cmd = 'osd pool create test_data_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, 0)
base_cmd = 'osd pool create test_metadata_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, 0)
self.fs.rados(["mksnap", "snap4"], pool='test_data_pool')
base_cmd = 'fs new testfs test_metadata_pool test_data_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, errno.EOPNOTSUPP)
# cleanup
self.fs.rados(["rmsnap", "snap4"], pool='test_data_pool')
base_cmd = 'osd pool delete test_data_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
base_cmd = 'osd pool delete test_metadata_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)