import logging
import os
import re
+from io import StringIO
from tasks.ceph_test_case import CephTestCase
kwargs['args'] = args
return self.run_ceph_cmd(**kwargs).exitstatus
+ def get_ceph_cmd_stdout(self, *args, **kwargs):
+ if kwargs.get('args') is None and args:
+ if len(args) == 1:
+ args = args[0]
+ kwargs['args'] = args
+ kwargs['stdout'] = kwargs.pop('stdout', StringIO())
+ return self.run_ceph_cmd(**kwargs).stdout.getvalue()
+
class CephFSTestCase(CephTestCase, RunCephCmd):
"""
except CommandFailedError:
# Fallback for older Ceph cluster
try:
- blocklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
- "dump", "--format=json-pretty"))['blocklist']
+ blocklist = json.loads(self.get_ceph_cmd_stdout("osd",
+ "dump", "--format=json-pretty"))['blocklist']
log.info(f"Removing {len(blocklist)} blocklist entries")
for addr, blocklisted_at in blocklist.items():
self.run_ceph_cmd("osd", "blocklist", "rm", addr)
except KeyError:
# Fallback for more older Ceph clusters, who will use 'blacklist' instead.
- blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
- "dump", "--format=json-pretty"))['blacklist']
+ blacklist = json.loads(self.get_ceph_cmd_stdout("osd",
+ "dump", "--format=json-pretty"))['blacklist']
log.info(f"Removing {len(blacklist)} blacklist entries")
for addr, blocklisted_at in blacklist.items():
self.run_ceph_cmd("osd", "blacklist", "rm", addr)
"""
Convenience wrapper on "ceph auth ls"
"""
- return json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd(
- "auth", "ls", "--format=json-pretty"
- ))['auth_dump']
+ return json.loads(self.get_ceph_cmd_stdout("auth", "ls",
+ "--format=json-pretty"))['auth_dump']
def assert_session_count(self, expected, ls_data=None, mds_id=None):
if ls_data is None:
MDSS_REQUIRED = 1
def check_pool_application_metadata_key_value(self, pool, app, key, value):
- output = self.fs.mon_manager.raw_cluster_cmd(
+ output = self.get_ceph_cmd_stdout(
'osd', 'pool', 'application', 'get', pool, app, key)
self.assertEqual(str(output.strip()), value)
def setup_ec_pools(self, n, metadata=True, overwrites=True):
if metadata:
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-meta", "8")
cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
- self.fs.mon_manager.raw_cluster_cmd(*cmd)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+ self.get_ceph_cmd_stdout(cmd)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
if overwrites:
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+ self.get_ceph_cmd_stdout('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
@classhook('_add_valid_tell')
class TestValidTell(TestAdminCommands):
That `ceph fs status` command functions.
"""
- s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
+ s = self.get_ceph_cmd_stdout("fs", "status")
self.assertTrue("active" in s)
- mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
+ mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json-pretty"))["mdsmap"]
self.assertEqual(mdsmap[0]["state"], "active")
- mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
+ mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json"))["mdsmap"]
self.assertEqual(mdsmap[0]["state"], "active")
That the application metadata set on a newly added data pool is as expected.
"""
pool_name = "foo"
- mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+ mon_cmd = self.get_ceph_cmd_stdout
mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min',
str(self.fs.pg_num_min))
# Check whether https://tracker.ceph.com/issues/43061 is fixed
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'first_data_pool' with 'second_fs'
# Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'add_data_pool', second_fs, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'second_metadata_pool' with 'first_fs' as a data pool
# Expecting EINVAL exit status because 'second_metadata_pool'
# is already in use with 'second_fs' as a metadata pool
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
+ self.get_ceph_cmd_stdout('fs', 'add_data_pool', first_fs, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
badname = n+'badname@#'
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- n+metapoolname)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- n+datapoolname)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+metapoolname)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+datapoolname)
# test that fsname not with "goodchars" fails
args = ['fs', 'new', badname, metapoolname, datapoolname]
check_status=False)
self.assertIn('invalid chars', proc.stderr.getvalue().lower())
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
- metapoolname,
- '--yes-i-really-really-mean-it-not-faking')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
- datapoolname,
- '--yes-i-really-really-mean-it-not-faking')
+ self.get_ceph_cmd_stdout('osd', 'pool', 'rm', metapoolname,
+ metapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
+ self.get_ceph_cmd_stdout('osd', 'pool', 'rm', datapoolname,
+ datapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
def test_new_default_ec(self):
"""
n = "test_new_default_ec"
self.setup_ec_pools(n)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+ self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_force"
self.setup_ec_pools(n)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
def test_new_default_ec_no_overwrite(self):
"""
n = "test_new_default_ec_no_overwrite"
self.setup_ec_pools(n, overwrites=False)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+ self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
raise RuntimeError("expected failure")
# and even with --force !
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
fs_name = "test_fs_new_pool_application"
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
- mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+ mon_cmd = self.get_ceph_cmd_stdout
for p in pool_names:
mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min))
mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'first_metadata_pool'
# Expecting EINVAL exit status because 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'second_metadata_pool'
# Expecting EINVAL exit status because 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
# Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
# Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
# Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
# Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
# create pool and initialise with rbd
new_pool = "new_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_data_pool = "new_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_data_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_pool' (already used by rbd app)
# data pool -> 'new_data_pool'
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_pool, new_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
# create pool and initialise with rbd
new_pool = "new_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_metadata_pool = "new_metadata_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool)
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_metadata_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_metadata_pool'
# data pool -> 'new_pool' (already used by rbd app)
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
+ self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_metadata_pool, new_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
"""
def is_required(index):
- out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty')
+ out = self.get_ceph_cmd_stdout('fs', 'get', self.fs.name, '--format=json-pretty')
features = json.loads(out)['mdsmap']['required_client_features']
if "feature_{0}".format(index) in features:
return True;
return False;
- features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
+ features = json.loads(self.get_ceph_cmd_stdout('fs', 'feature', 'ls', '--format=json-pretty'))
self.assertGreater(len(features), 0);
for f in features:
names = self.fs.get_rank_names()
for n in names:
- s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
+ s = self.get_ceph_cmd_stdout("config", "show", "mds."+n)
self.assertTrue("NAME" in s)
self.assertTrue("mon_host" in s)
MDSS_REQUIRED = 1
def _enable_mirroring(self, fs_name):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "enable", fs_name)
def _disable_mirroring(self, fs_name):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "disable", fs_name)
def _add_peer(self, fs_name, peer_spec, remote_fs_name):
peer_uuid = str(uuid.uuid4())
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
def _remove_peer(self, fs_name, peer_uuid):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.get_ceph_cmd_stdout("fs", "mirror", "peer_remove", fs_name, peer_uuid)
def _verify_mirroring(self, fs_name, flag_str):
status = self.fs.status()
# Reset MDS state
self.mount_a.umount_wait(force=True)
self.fs.fail()
- self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+ self.get_ceph_cmd_stdout('mds', 'repaired', '0')
# Reset RADOS pool state
self.fs.radosm(['import', '-'], stdin=BytesIO(serialized))
# EIOs mean something handled by DamageTable: assert that it has
# been populated
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), "damage", "ls", '--format=json-pretty'))
+ self.get_ceph_cmd_stdout(
+ 'tell', f'mds.{self.fs.get_active_names()[0]}',
+ "damage", "ls", '--format=json-pretty'))
if len(damage) == 0:
results[mutation] = EIO_NO_DAMAGE
# The fact that there is damaged should have bee recorded
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+ self.get_ceph_cmd_stdout(
+ 'tell', f'mds.{self.fs.get_active_names()[0]}',
"damage", "ls", '--format=json-pretty'))
self.assertEqual(len(damage), 1)
damage_id = damage[0]['id']
self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
# Clean up the damagetable entry
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
- "damage", "rm", "{did}".format(did=damage_id))
+ self.get_ceph_cmd_stdout(
+ 'tell', f'mds.{self.fs.get_active_names()[0]}',
+ "damage", "rm", f"{damage_id}")
# Now I should be able to create a file with the same name as the
# damaged guy if I want.
# Check that an entry is created in the damage table
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "ls", '--format=json-pretty'))
self.assertEqual(len(damage), 1)
self.assertEqual(damage[0]['damage_type'], "backtrace")
self.assertEqual(damage[0]['ino'], file1_ino)
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "rm", str(damage[0]['id']))
# Check that an entry is created in the damage table
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "ls", '--format=json-pretty'))
self.assertEqual(len(damage), 2)
self.assertEqual(damage[1]['ino'], file2_ino)
for entry in damage:
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "rm", str(entry['id']))
self.fs.data_scan(["scan_links"])
# Mark the MDS repaired
- self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+ self.get_ceph_cmd_stdout('mds', 'repaired', '0')
# Start the MDS
self.fs.mds_restart()
file_path = "mydir/myfile_{0}".format(i)
ino = self.mount_a.path_to_ino(file_path)
obj = "{0:x}.{1:08x}".format(ino, 0)
- pgid = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+ pgid = json.loads(self.get_ceph_cmd_stdout(
"osd", "map", self.fs.get_data_pool_name(), obj,
"--format=json-pretty"
))['pgid']
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+ self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
# Kill a standby and check for warning
victim = standbys.pop()
# Set it one greater than standbys ever seen
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+ self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
# Set it to 0
- self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+ self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
self.wait_for_health_clear(timeout=30)
def test_discontinuous_mdsmap(self):
def setUp(self):
super(TestMultiFilesystems, self).setUp()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
- "enable_multiple", "true",
- "--yes-i-really-mean-it")
+ self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
def _setup_two(self):
fs_a = self.mds_cluster.newfs(name="alpha")
# Kill fs_a's active MDS, see a standby take over
self.mds_cluster.mds_stop(original_a)
- self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
+ self.get_ceph_cmd_stdout("mds", "fail", original_a)
self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
reject_fn=lambda v: v > 1)
# Assert that it's a *different* daemon that has now appeared in the map for fs_a
# Kill fs_b's active MDS, see a standby take over
self.mds_cluster.mds_stop(original_b)
- self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
+ self.get_ceph_cmd_stdout("mds", "fail", original_b)
self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
reject_fn=lambda v: v > 1)
# Assert that it's a *different* daemon that has now appeared in the map for fs_a
target_files = branch_factor**depth * int(split_size * 1.5)
create_files = target_files - files_written
- self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
+ self.get_ceph_cmd_stdout("log",
"{0} Writing {1} files (depth={2})".format(
self.__class__.__name__, create_files, depth
))
self.mount_a.create_n_files("splitdir/file_{0}".format(depth),
create_files)
- self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
+ self.get_ceph_cmd_stdout("log",
"{0} Done".format(self.__class__.__name__))
files_written += create_files
super(TestFSTop, self).tearDown()
def _enable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "enable", "stats")
def _disable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "disable", "stats")
def _fstop_dump(self, *args):
return self.mount_a.run_shell(['cephfs-top',
# umount mount_b, mount another filesystem on it and use --dumpfs filter
self.mount_b.umount_wait()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set", "enable_multiple", "true",
- "--yes-i-really-mean-it")
+ self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# create a new filesystem
fs_b = self.mds_cluster.newfs(name=newfs_name)
self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
# Set and unset a flag to cause OSD epoch to increment
- self.fs.mon_manager.raw_cluster_cmd("osd", "set", "pause")
- self.fs.mon_manager.raw_cluster_cmd("osd", "unset", "pause")
+ self.get_ceph_cmd_stdout("osd", "set", "pause")
+ self.get_ceph_cmd_stdout("osd", "unset", "pause")
- out = self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json").strip()
+ out = self.get_ceph_cmd_stdout("osd", "dump", "--format=json").strip()
new_epoch = json.loads(out)['epoch']
self.assertNotEqual(self.initial_osd_epoch, new_epoch)
# Wait for the MDS to see the latest OSD map so that it will reliably
# be applying the policy of rejecting non-deletion metadata operations
# while in the full state.
- osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+ osd_epoch = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['epoch']
self.wait_until_true(
lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch,
timeout=10)
# Wait for the MDS to see the latest OSD map so that it will reliably
# be applying the free space policy
- osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+ osd_epoch = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['epoch']
self.wait_until_true(
lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch,
timeout=10)
super(TestQuotaFull, self).setUp()
pool_name = self.fs.get_data_pool_name()
- self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", pool_name,
- "max_bytes", "{0}".format(self.pool_capacity))
+ self.get_ceph_cmd_stdout("osd", "pool", "set-quota", pool_name,
+ "max_bytes", f"{self.pool_capacity}")
class TestClusterFull(FullnessTestCase):
self.fs.table_tool(["0", "reset", "session"])
self.fs.journal_tool(["journal", "reset"], 0)
self.fs.erase_mds_objects(1)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
- '--yes-i-really-mean-it')
+ self.get_ceph_cmd_stdout('fs', 'reset', self.fs.name,
+ '--yes-i-really-mean-it')
# Bring an MDS back online, mount a client, and see that we can walk the full
# filesystem tree again
expect = " : (110) Connection timed out"
# kill the OSDs so that the balancer pull from RADOS times out
- osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
+ osd_map = json.loads(self.get_ceph_cmd_stdout('osd', 'dump', '--format=json-pretty'))
for i in range(0, len(osd_map['osds'])):
self.get_ceph_cmd_result('osd', 'down', str(i))
self.get_ceph_cmd_result('osd', 'out', str(i))
return verify_metrics_cbk
def _fs_perf_stats(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", *args)
+ return self.get_ceph_cmd_stdout("fs", "perf", "stats", *args)
def _enable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "enable", "stats")
def _disable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "disable", "stats")
def _spread_directory_on_all_ranks(self, fscid):
fs_status = self.fs.status()
invalid_mds_rank = "1,"
# try, 'fs perf stat' command with invalid mds_rank
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
+ self.get_ceph_cmd_stdout("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
invalid_client_id = "abcd"
# try, 'fs perf stat' command with invalid client_id
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
+ self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_id", invalid_client_id)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
invalid_client_ip = "1.2.3"
# try, 'fs perf stat' command with invalid client_ip
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
+ self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_ip", invalid_client_ip)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
self.mount_b.umount_wait()
self.fs.delete_all_filesystems()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
- "enable_multiple", "true", "--yes-i-really-mean-it")
+ self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# creating filesystem
fs_a = self._setup_fs(fs_name="fs1")
self.mount_a.umount_wait()
self.mount_b.umount_wait()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
- "enable_multiple", "true", "--yes-i-really-mean-it")
+ self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# creating filesystem
fs_b = self._setup_fs(fs_name="fs2")
super(TestMirroring, self).tearDown()
def enable_mirroring_module(self):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
+ self.get_ceph_cmd_stdout("mgr", "module", "enable", TestMirroring.MODULE_NAME)
def disable_mirroring_module(self):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
+ self.get_ceph_cmd_stdout("mgr", "module", "disable", TestMirroring.MODULE_NAME)
def enable_mirroring(self, fs_name, fs_id):
res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "enable", fs_name)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "enable", fs_name)
time.sleep(10)
# verify via asok
res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "disable", fs_name)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "disable", fs_name)
time.sleep(10)
# verify via asok
try:
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
if remote_fs_name:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
else:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
time.sleep(10)
self.verify_peer_added(fs_name, fs_id, peer_spec, remote_fs_name)
vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
peer_uuid = self.get_peer_uuid(peer_spec)
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
time.sleep(10)
# verify via asok
res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
self.assertLess(vafter["counters"]["mirroring_peers"], vbefore["counters"]["mirroring_peers"])
def bootstrap_peer(self, fs_name, client_name, site_name):
- outj = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
- "fs", "snapshot", "mirror", "peer_bootstrap", "create", fs_name, client_name, site_name))
+ outj = json.loads(self.get_ceph_cmd_stdout(
+ "fs", "snapshot", "mirror", "peer_bootstrap", "create", fs_name,
+ client_name, site_name))
return outj['token']
def import_peer(self, fs_name, token):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_bootstrap", "import",
- fs_name, token)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror",
+ "peer_bootstrap", "import", fs_name, token)
def add_directory(self, fs_name, fs_id, dir_name, check_perf_counter=True):
if check_perf_counter:
dir_count = res['snap_dirs']['dir_count']
log.debug(f'initial dir_count={dir_count}')
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", fs_name, dir_name)
time.sleep(10)
# verify via asok
dir_count = res['snap_dirs']['dir_count']
log.debug(f'initial dir_count={dir_count}')
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
time.sleep(10)
# verify via asok
return json.loads(res)
def get_mirror_daemon_status(self):
- daemon_status = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "daemon", "status"))
+ daemon_status = json.loads(self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "daemon", "status"))
log.debug(f'daemon_status: {daemon_status}')
# running a single mirror daemon is supported
status = daemon_status[0]
# try removing peer
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise RuntimeError(-errno.EINVAL, 'incorrect error code when removing a peer')
# enable mirroring through mon interface -- this should result in the mirror daemon
# failing to enable mirroring due to absence of `cephfs_mirror` index object.
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", self.primary_fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
with safe_while(sleep=5, tries=10, action='wait for failed state') as proceed:
while proceed():
except:
pass
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", self.primary_fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
time.sleep(10)
# verify via asok
try:
# enable mirroring through mon interface -- this should result in the mirror daemon
# failing to enable mirroring due to absence of `cephfs_mirror` index object.
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", self.primary_fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
# need safe_while since non-failed status pops up as mirroring is restarted
# internally in mirror daemon.
with safe_while(sleep=5, tries=20, action='wait for failed state') as proceed:
self.assertTrue(res['peers'] == {})
self.assertTrue(res['snap_dirs']['dir_count'] == 0)
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", self.primary_fs_name)
+ self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
time.sleep(10)
# verify via asok
try:
# verify via peer_list interface
peer_uuid = self.get_peer_uuid("client.mirror_peer_bootstrap@site-remote")
- res = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_list", self.primary_fs_name))
+ res = json.loads(self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_list", self.primary_fs_name))
self.assertTrue(peer_uuid in res)
# remove peer
dir_path_p = "/d0/d1"
dir_path = "/d0/d1/d2"
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
time.sleep(10)
# this uses an undocumented interface to get dirpath map state
- res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+ res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
res = json.loads(res_json)
# there are no mirror daemons
self.assertTrue(res['state'], 'stalled')
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
time.sleep(10)
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
except CommandFailedError as ce:
if ce.exitstatus != errno.ENOENT:
raise RuntimeError('invalid errno when checking dirmap status for non-existent directory')
raise RuntimeError('incorrect errno when checking dirmap state for non-existent directory')
# adding a parent directory should be allowed
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
+ self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
time.sleep(10)
# however, this directory path should get stalled too
- res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
+ res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
res = json.loads(res_json)
# there are no mirror daemons
self.assertTrue(res['state'], 'stalled')
# wait for restart mirror on blocklist
time.sleep(60)
- res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
+ res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
res = json.loads(res_json)
# there are no mirror daemons
self.assertTrue(res['state'], 'mapped')
self.fs.fail()
- self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
- '--yes-i-really-mean-it')
+ self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
+ '--yes-i-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
- self.fs.metadata_pool_name,
- self.fs.metadata_pool_name,
- '--yes-i-really-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- self.fs.metadata_pool_name,
- '--pg_num_min', str(self.fs.pg_num_min))
+ self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ '--pg_num_min', str(self.fs.pg_num_min))
# insert a garbage object
self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name)
+ self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
raise AssertionError("Expected EINVAL")
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name, "--force")
+ self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name, "--force")
- self.fs.mon_manager.raw_cluster_cmd('fs', 'fail', self.fs.name)
+ self.get_ceph_cmd_stdout('fs', 'fail', self.fs.name)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
- '--yes-i-really-mean-it')
+ self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
+ '--yes-i-really-mean-it'])
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
- self.fs.metadata_pool_name,
- self.fs.metadata_pool_name,
- '--yes-i-really-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- self.fs.metadata_pool_name,
- '--pg_num_min', str(self.fs.pg_num_min))
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name,
- '--allow_dangerous_metadata_overlay')
+ self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.get_ceph_cmd_stdout('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ '--pg_num_min', str(self.fs.pg_num_min))
+ self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name,
+ '--allow_dangerous_metadata_overlay')
def test_cap_revoke_nonresponder(self):
"""
pool_name = self.fs.get_data_pool_name()
raw_df = self.fs.get_pool_df(pool_name)
raw_avail = float(raw_df["max_avail"])
- out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
- pool_name, 'size',
- '-f', 'json-pretty')
+ out = self.get_ceph_cmd_stdout('osd', 'pool', 'get', pool_name,
+ 'size', '-f', 'json-pretty')
_ = json.loads(out)
proc = self.mount_a.run_shell(['df', '.'])
self.fs.set_allow_new_snaps(False)
self.fs.set_allow_standby_replay(True)
- lsflags = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'lsflags',
- self.fs.name,
- "--format=json-pretty"))
+ lsflags = json.loads(self.get_ceph_cmd_stdout(
+ 'fs', 'lsflags', self.fs.name, "--format=json-pretty"))
self.assertEqual(lsflags["joinable"], False)
self.assertEqual(lsflags["allow_snaps"], False)
self.assertEqual(lsflags["allow_multimds_snaps"], True)
self.fs.mds_asok(['config', 'set', 'debug_mds', '1/10'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
try:
- mons = json.loads(self.fs.mon_manager.raw_cluster_cmd('mon', 'dump', '-f', 'json'))['mons']
+ mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
except:
self.assertTrue(False, "Error fetching monitors")
self.fs.mds_asok(['config', 'set', 'mds_heartbeat_grace', '1'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
try:
- mons = json.loads(self.fs.mon_manager.raw_cluster_cmd('mon', 'dump', '-f', 'json'))['mons']
+ mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
except:
self.assertTrue(False, "Error fetching monitors")
def expect_exdev(cmd, mds):
try:
- self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(mds), *cmd)
+ self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(mds), *cmd)
except CommandFailedError as e:
if e.exitstatus == errno.EXDEV:
pass
# TODO Add test for cluster update when ganesha can be deployed on multiple ports.
class TestNFS(MgrTestCase):
def _cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+ return self.get_ceph_cmd_stdout(args)
def _nfs_cmd(self, *args):
return self._cmd("nfs", *args)
# recovered/intact
self.fs.rm()
# Recreate file system with pool and previous fscid
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'fs', 'new', self.fs.name, metadata_pool, data_pool,
'--recover', '--force', '--fscid', f'{self.fs.id}')
self.fs.set_joinable()
recovery_fs.create(recover=True, metadata_overlay=True)
recovery_pool = recovery_fs.get_metadata_pool_name()
- recovery_fs.mon_manager.raw_cluster_cmd('-s')
+ self.get_ceph_cmd_stdout('-s')
# Reset the MDS map in case multiple ranks were in play: recovery procedure
# only understands how to rebuild metadata under rank 0
all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
for d in damage:
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[mds_rank]),
+ self.get_ceph_cmd_stdout(
+ 'tell', f'mds.{self.fs.get_active_names()[mds_rank]}',
"damage", "rm", str(d['id']))
return len(damage) > 0
if mon_caps is None:
mon_caps = "allow r"
- out = self.fs.mon_manager.raw_cluster_cmd(
+ out = self.get_ceph_cmd_stdout(
"auth", "get-or-create", "client.{name}".format(name=id_name),
"mds", mds_caps,
"osd", osd_caps,
self.assertTrue((delta <= timo + 5) and (delta >= timo - 5))
def _fs_cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+ return self.get_ceph_cmd_stdout("fs", *args)
def fs_snap_schedule_cmd(self, *args, **kwargs):
if 'fs' in kwargs:
self.volname = result[0]['name']
def _enable_snap_schedule(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "snap_schedule")
+ return self.get_ceph_cmd_stdout("mgr", "module", "enable", "snap_schedule")
def _disable_snap_schedule(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "snap_schedule")
+ return self.get_ceph_cmd_stdout("mgr", "module", "disable", "snap_schedule")
def _allow_minute_granularity_snapshots(self):
self.config_set('mgr', 'mgr/snap_schedule/allow_m_granularity', True)
self.assertFalse(self._is_stopped(1))
# Permit the daemon to start purging again
- self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(rank_1_id),
- 'injectargs',
- "--mds_max_purge_files 100")
+ self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(rank_1_id),
+ 'injectargs', "--mds_max_purge_files 100")
# It should now proceed through shutdown
self.fs.wait_for_daemons(timeout=120)
:param pool_name: Which pool (must exist)
"""
- out = self.fs.mon_manager.raw_cluster_cmd("df", "--format=json-pretty")
+ out = self.get_ceph_cmd_stdout("df", "--format=json-pretty")
for p in json.loads(out)['pools']:
if p['name'] == pool_name:
return p['stats']
DEFAULT_NUMBER_OF_FILES = 1024
def _fs_cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+ return self.get_ceph_cmd_stdout("fs", *args)
def _raw_cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+ return self.get_ceph_cmd_stdout(args)
def __check_clone_state(self, state, clone, clone_group=None, timo=120):
check = 0
# Create auth_id
authid = "client.guest1"
- user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+ user = json.loads(self.get_ceph_cmd_stdout(
"auth", "get-or-create", authid,
"mds", "allow rw path=/volumes",
"mgr", "allow rw",
# Create auth_id
authid = "client.guest1"
- user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+ user = json.loads(self.get_ceph_cmd_stdout(
"auth", "get-or-create", authid,
"mds", f"allow rw path={mount_path}",
"mgr", "allow rw",
group = self._gen_subvol_grp_name()
# Create auth_id
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
- "mon", "allow *"
+ "mon", "allow *"]
)
auth_id = "guest1"
self.fail("expected the 'fs subvolume authorize' command to fail")
# clean up
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
group = self._gen_subvol_grp_name()
# Create auth_id
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
"--group_name", group)
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
"--group_name", group).rstrip()
# Update caps for guestclient_1 out of band
- out = self.fs.mon_manager.raw_cluster_cmd(
+ out = self.get_ceph_cmd_stdout(
"auth", "caps", "client.guest1",
"mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
"osd", "allow rw pool=cephfs_data",
# Validate the caps of guestclient_1 after deauthorize. It should not have deleted
# guestclient_1. The mgr and mds caps should be present which was updated out of band.
- out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
+ out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.guest1", "--format=json-pretty"))
self.assertEqual("client.guest1", out[0]["entity"])
self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
self.assertNotIn("osd", out[0]["caps"])
# clean up
- out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ out = self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
new_pool = "new_pool"
self.fs.add_data_pool(new_pool)
- self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
- "max_bytes", "{0}".format(pool_capacity // 4))
+ self.get_ceph_cmd_stdout("osd", "pool", "set-quota", new_pool,
+ "max_bytes", f"{pool_capacity // 4}")
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
# Validate that the mds path added is of subvol1 and not of subvol2
- out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
+ out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.alice", "--format=json-pretty"))
self.assertEqual("client.alice", out[0]["entity"])
self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
import configparser
cp = configparser.ConfigParser()
- cp.read_string(self.fs.mon_manager.raw_cluster_cmd(
- 'auth', 'get-or-create', 'client.admin'))
+ cp.read_string(self.get_ceph_cmd_stdout('auth', 'get-or-create',
+ 'client.admin'))
return cp['client.admin']['key']