def setup_ec_pools(self, n, metadata=True, overwrites=True):
if metadata:
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-meta", "8")
+ self.run_ceph_cmd('osd', 'pool', 'create', n+"-meta", "8")
cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
- self.get_ceph_cmd_stdout(cmd)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+ self.run_ceph_cmd(cmd)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
if overwrites:
- self.get_ceph_cmd_stdout('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+ self.run_ceph_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
@classhook('_add_valid_tell')
class TestValidTell(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'first_data_pool' with 'second_fs'
# Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'add_data_pool', second_fs, first_data_pool)
+ self.run_ceph_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'second_metadata_pool' with 'first_fs' as a data pool
# Expecting EINVAL exit status because 'second_metadata_pool'
# is already in use with 'second_fs' as a metadata pool
try:
- self.get_ceph_cmd_stdout('fs', 'add_data_pool', first_fs, second_metadata_pool)
+ self.run_ceph_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
badname = n+'badname@#'
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+metapoolname)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+datapoolname)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+metapoolname)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+datapoolname)
# test that fsname not with "goodchars" fails
args = ['fs', 'new', badname, metapoolname, datapoolname]
check_status=False)
self.assertIn('invalid chars', proc.stderr.getvalue().lower())
- self.get_ceph_cmd_stdout('osd', 'pool', 'rm', metapoolname,
- metapoolname,
- '--yes-i-really-really-mean-it-not-faking')
- self.get_ceph_cmd_stdout('osd', 'pool', 'rm', datapoolname,
- datapoolname,
- '--yes-i-really-really-mean-it-not-faking')
+ self.run_ceph_cmd('osd', 'pool', 'rm', metapoolname,
+ metapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
+ self.run_ceph_cmd('osd', 'pool', 'rm', datapoolname,
+ datapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
def test_new_default_ec(self):
"""
n = "test_new_default_ec"
self.setup_ec_pools(n)
try:
- self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_force"
self.setup_ec_pools(n)
- self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
def test_new_default_ec_no_overwrite(self):
"""
n = "test_new_default_ec_no_overwrite"
self.setup_ec_pools(n, overwrites=False)
try:
- self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
raise RuntimeError("expected failure")
# and even with --force !
try:
- self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_data_pool = "second_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'first_metadata_pool'
# Expecting EINVAL exit status because 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'second_metadata_pool'
# Expecting EINVAL exit status because 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
# Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
# Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
# Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
- self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
# Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
+ self.run_ceph_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
# create pool and initialise with rbd
new_pool = "new_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_data_pool = "new_data_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_data_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_pool' (already used by rbd app)
# data pool -> 'new_data_pool'
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_pool, new_data_pool)
+ self.run_ceph_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
# create pool and initialise with rbd
new_pool = "new_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_metadata_pool = "new_metadata_pool"
- self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_metadata_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_metadata_pool'
# data pool -> 'new_pool' (already used by rbd app)
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_metadata_pool, new_pool)
+ self.run_ceph_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
MDSS_REQUIRED = 1
def _enable_mirroring(self, fs_name):
- self.get_ceph_cmd_stdout("fs", "mirror", "enable", fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", fs_name)
def _disable_mirroring(self, fs_name):
- self.get_ceph_cmd_stdout("fs", "mirror", "disable", fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", fs_name)
def _add_peer(self, fs_name, peer_spec, remote_fs_name):
peer_uuid = str(uuid.uuid4())
- self.get_ceph_cmd_stdout("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
def _remove_peer(self, fs_name, peer_uuid):
- self.get_ceph_cmd_stdout("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.run_ceph_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
def _verify_mirroring(self, fs_name, flag_str):
status = self.fs.status()
# Reset MDS state
self.mount_a.umount_wait(force=True)
self.fs.fail()
- self.get_ceph_cmd_stdout('mds', 'repaired', '0')
+ self.run_ceph_cmd('mds', 'repaired', '0')
# Reset RADOS pool state
self.fs.radosm(['import', '-'], stdin=BytesIO(serialized))
self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
# Clean up the damagetable entry
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
'tell', f'mds.{self.fs.get_active_names()[0]}',
"damage", "rm", f"{damage_id}")
self.assertEqual(damage[0]['damage_type'], "backtrace")
self.assertEqual(damage[0]['ino'], file1_ino)
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "rm", str(damage[0]['id']))
self.assertEqual(damage[1]['ino'], file2_ino)
for entry in damage:
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "rm", str(entry['id']))
self.fs.data_scan(["scan_links"])
# Mark the MDS repaired
- self.get_ceph_cmd_stdout('mds', 'repaired', '0')
+ self.run_ceph_cmd('mds', 'repaired', '0')
# Start the MDS
self.fs.mds_restart()
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
- self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+ self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
# Kill a standby and check for warning
victim = standbys.pop()
# Set it one greater than standbys ever seen
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
- self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+ self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
# Set it to 0
- self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+ self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
self.wait_for_health_clear(timeout=30)
def test_discontinuous_mdsmap(self):
def setUp(self):
super(TestMultiFilesystems, self).setUp()
- self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
- "true", "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
def _setup_two(self):
fs_a = self.mds_cluster.newfs(name="alpha")
# Kill fs_a's active MDS, see a standby take over
self.mds_cluster.mds_stop(original_a)
- self.get_ceph_cmd_stdout("mds", "fail", original_a)
+ self.run_ceph_cmd("mds", "fail", original_a)
self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
reject_fn=lambda v: v > 1)
# Assert that it's a *different* daemon that has now appeared in the map for fs_a
# Kill fs_b's active MDS, see a standby take over
self.mds_cluster.mds_stop(original_b)
- self.get_ceph_cmd_stdout("mds", "fail", original_b)
+ self.run_ceph_cmd("mds", "fail", original_b)
self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
reject_fn=lambda v: v > 1)
# Assert that it's a *different* daemon that has now appeared in the map for fs_a
target_files = branch_factor**depth * int(split_size * 1.5)
create_files = target_files - files_written
- self.get_ceph_cmd_stdout("log",
+ self.run_ceph_cmd("log",
"{0} Writing {1} files (depth={2})".format(
self.__class__.__name__, create_files, depth
))
self.mount_a.create_n_files("splitdir/file_{0}".format(depth),
create_files)
- self.get_ceph_cmd_stdout("log",
- "{0} Done".format(self.__class__.__name__))
+ self.run_ceph_cmd("log","{0} Done".format(self.__class__.__name__))
files_written += create_files
log.info("Now have {0} files".format(files_written))
# umount mount_b, mount another filesystem on it and use --dumpfs filter
self.mount_b.umount_wait()
- self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
- "true", "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple", "true",
+ "--yes-i-really-mean-it")
# create a new filesystem
fs_b = self.mds_cluster.newfs(name=newfs_name)
self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
# Set and unset a flag to cause OSD epoch to increment
- self.get_ceph_cmd_stdout("osd", "set", "pause")
- self.get_ceph_cmd_stdout("osd", "unset", "pause")
+ self.run_ceph_cmd("osd", "set", "pause")
+ self.run_ceph_cmd("osd", "unset", "pause")
out = self.get_ceph_cmd_stdout("osd", "dump", "--format=json").strip()
new_epoch = json.loads(out)['epoch']
super(TestQuotaFull, self).setUp()
pool_name = self.fs.get_data_pool_name()
- self.get_ceph_cmd_stdout("osd", "pool", "set-quota", pool_name,
- "max_bytes", f"{self.pool_capacity}")
+ self.run_ceph_cmd("osd", "pool", "set-quota", pool_name,
+ "max_bytes", f"{self.pool_capacity}")
class TestClusterFull(FullnessTestCase):
self.fs.table_tool(["0", "reset", "session"])
self.fs.journal_tool(["journal", "reset"], 0)
self.fs.erase_mds_objects(1)
- self.get_ceph_cmd_stdout('fs', 'reset', self.fs.name,
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'reset', self.fs.name,
+ '--yes-i-really-mean-it')
# Bring an MDS back online, mount a client, and see that we can walk the full
# filesystem tree again
invalid_mds_rank = "1,"
# try, 'fs perf stat' command with invalid mds_rank
try:
- self.get_ceph_cmd_stdout("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
+ self.run_ceph_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
invalid_client_id = "abcd"
# try, 'fs perf stat' command with invalid client_id
try:
- self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_id", invalid_client_id)
+ self.run_ceph_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
invalid_client_ip = "1.2.3"
# try, 'fs perf stat' command with invalid client_ip
try:
- self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_ip", invalid_client_ip)
+ self.run_ceph_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
self.mount_b.umount_wait()
self.fs.delete_all_filesystems()
- self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
- "true", "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# creating filesystem
fs_a = self._setup_fs(fs_name="fs1")
self.mount_a.umount_wait()
self.mount_b.umount_wait()
- self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
- "true", "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# creating filesystem
fs_b = self._setup_fs(fs_name="fs2")
super(TestMirroring, self).tearDown()
def enable_mirroring_module(self):
- self.get_ceph_cmd_stdout("mgr", "module", "enable", TestMirroring.MODULE_NAME)
+ self.run_ceph_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
def disable_mirroring_module(self):
- self.get_ceph_cmd_stdout("mgr", "module", "disable", TestMirroring.MODULE_NAME)
+ self.run_ceph_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
def enable_mirroring(self, fs_name, fs_id):
res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "enable", fs_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "enable", fs_name)
time.sleep(10)
# verify via asok
res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "disable", fs_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "disable", fs_name)
time.sleep(10)
# verify via asok
try:
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
if remote_fs_name:
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
else:
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
time.sleep(10)
self.verify_peer_added(fs_name, fs_id, peer_spec, remote_fs_name)
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
peer_uuid = self.get_peer_uuid(peer_spec)
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
time.sleep(10)
# verify via asok
res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
return outj['token']
def import_peer(self, fs_name, token):
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror",
- "peer_bootstrap", "import", fs_name, token)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_bootstrap",
+ "import", fs_name, token)
def add_directory(self, fs_name, fs_id, dir_name, check_perf_counter=True):
if check_perf_counter:
dir_count = res['snap_dirs']['dir_count']
log.debug(f'initial dir_count={dir_count}')
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", fs_name, dir_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
time.sleep(10)
# verify via asok
dir_count = res['snap_dirs']['dir_count']
log.debug(f'initial dir_count={dir_count}')
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
time.sleep(10)
# verify via asok
# try removing peer
try:
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise RuntimeError(-errno.EINVAL, 'incorrect error code when removing a peer')
# enable mirroring through mon interface -- this should result in the mirror daemon
# failing to enable mirroring due to absence of `cephfs_mirror` index object.
- self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
with safe_while(sleep=5, tries=10, action='wait for failed state') as proceed:
while proceed():
except:
pass
- self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
time.sleep(10)
# verify via asok
try:
# enable mirroring through mon interface -- this should result in the mirror daemon
# failing to enable mirroring due to absence of `cephfs_mirror` index object.
- self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
# need safe_while since non-failed status pops up as mirroring is restarted
# internally in mirror daemon.
with safe_while(sleep=5, tries=20, action='wait for failed state') as proceed:
self.assertTrue(res['peers'] == {})
self.assertTrue(res['snap_dirs']['dir_count'] == 0)
- self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
time.sleep(10)
# verify via asok
try:
dir_path_p = "/d0/d1"
dir_path = "/d0/d1/d2"
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
time.sleep(10)
# this uses an undocumented interface to get dirpath map state
# there are no mirror daemons
self.assertTrue(res['state'], 'stalled')
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
time.sleep(10)
try:
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
except CommandFailedError as ce:
if ce.exitstatus != errno.ENOENT:
raise RuntimeError('invalid errno when checking dirmap status for non-existent directory')
raise RuntimeError('incorrect errno when checking dirmap state for non-existent directory')
# adding a parent directory should be allowed
- self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
time.sleep(10)
# however, this directory path should get stalled too
self.fs.fail()
- self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'rm', self.fs.name, '--yes-i-really-mean-it')
- self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
- self.fs.metadata_pool_name,
- self.fs.metadata_pool_name,
- '--yes-i-really-really-mean-it')
- self.get_ceph_cmd_stdout('osd', 'pool', 'create',
- self.fs.metadata_pool_name,
- '--pg_num_min', str(self.fs.pg_num_min))
+ self.run_ceph_cmd('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.run_ceph_cmd('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ '--pg_num_min', str(self.fs.pg_num_min))
# insert a garbage object
self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
try:
- self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name)
+ self.run_ceph_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
raise AssertionError("Expected EINVAL")
- self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name, "--force")
-
- self.get_ceph_cmd_stdout('fs', 'fail', self.fs.name)
-
- self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
- '--yes-i-really-mean-it'])
-
- self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
- self.fs.metadata_pool_name,
- self.fs.metadata_pool_name,
- '--yes-i-really-really-mean-it')
- self.get_ceph_cmd_stdout('osd', 'pool', 'create',
- self.fs.metadata_pool_name,
- '--pg_num_min', str(self.fs.pg_num_min))
- self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name,
- '--allow_dangerous_metadata_overlay')
+ self.run_ceph_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name, "--force")
+
+ self.run_ceph_cmd('fs', 'fail', self.fs.name)
+
+ self.run_ceph_cmd('fs', 'rm', self.fs.name,
+ '--yes-i-really-mean-it')
+
+ self.run_ceph_cmd('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.run_ceph_cmd('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ '--pg_num_min', str(self.fs.pg_num_min))
+ self.run_ceph_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name,
+ '--allow_dangerous_metadata_overlay')
def test_cap_revoke_nonresponder(self):
"""
def expect_exdev(cmd, mds):
try:
- self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(mds), *cmd)
+ self.run_ceph_cmd('tell', 'mds.{0}'.format(mds), *cmd)
except CommandFailedError as e:
if e.exitstatus == errno.EXDEV:
pass
# recovered/intact
self.fs.rm()
# Recreate file system with pool and previous fscid
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
'fs', 'new', self.fs.name, metadata_pool, data_pool,
'--recover', '--force', '--fscid', f'{self.fs.id}')
self.fs.set_joinable()
recovery_fs.create(recover=True, metadata_overlay=True)
recovery_pool = recovery_fs.get_metadata_pool_name()
- self.get_ceph_cmd_stdout('-s')
+ self.run_ceph_cmd('-s')
# Reset the MDS map in case multiple ranks were in play: recovery procedure
# only understands how to rebuild metadata under rank 0
all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
for d in damage:
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
'tell', f'mds.{self.fs.get_active_names()[mds_rank]}',
"damage", "rm", str(d['id']))
return len(damage) > 0
self.assertFalse(self._is_stopped(1))
# Permit the daemon to start purging again
- self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(rank_1_id),
- 'injectargs', "--mds_max_purge_files 100")
+ self.run_ceph_cmd('tell', 'mds.{0}'.format(rank_1_id),
+ 'injectargs', "--mds_max_purge_files 100")
# It should now proceed through shutdown
self.fs.wait_for_daemons(timeout=120)
group = self._gen_subvol_grp_name()
# Create auth_id
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
- "mon", "allow *"]
+ "mon", "allow *"
)
auth_id = "guest1"
self.fail("expected the 'fs subvolume authorize' command to fail")
# clean up
- self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
group = self._gen_subvol_grp_name()
# Create auth_id
- self.get_ceph_cmd_stdout(
+ self.run_ceph_cmd(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
"--group_name", group)
- self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
guest_mount.umount_wait()
- self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
new_pool = "new_pool"
self.fs.add_data_pool(new_pool)
- self.get_ceph_cmd_stdout("osd", "pool", "set-quota", new_pool,
- "max_bytes", f"{pool_capacity // 4}")
+ self.run_ceph_cmd("osd", "pool", "set-quota", new_pool,
+ "max_bytes", f"{pool_capacity // 4}")
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)