if mdss.remotes:
log.info('Setting up CephFS filesystem...')
- Filesystem(ctx, create='cephfs')
+ fs = Filesystem(ctx, create='cephfs')
is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role
all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
num_active = len([r for r in all_roles if is_active_mds(r)])
- mon_remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph', 'mds', 'set', 'allow_multimds', 'true',
- '--yes-i-really-mean-it'],
- check_status=False, # probably old version, upgrade test
- )
- mon_remote.run(args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph',
- '--cluster', cluster_name,
- 'mds', 'set_max_mds', str(num_active)])
- mon_remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph', 'mds', 'set', 'allow_dirfrags', 'true',
- '--yes-i-really-mean-it'],
- check_status=False, # probably old version, upgrade test
- )
+
+ fs.set_allow_multimds(True)
+ fs.set_max_mds(num_active)
+ fs.set_allow_dirfrags(True)
yield
def set_max_mds(self, max_mds):
self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "max_mds", "%d" % max_mds)
+ def set_allow_dirfrags(self, yes):
+ self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_dirfrags", str(yes).lower(), '--yes-i-really-mean-it')
+
+ def set_allow_multimds(self, yes):
+ self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_multimds", str(yes).lower(), '--yes-i-really-mean-it')
+
def get_pgs_per_fs_pool(self):
"""
Calculate how many PGs to use when creating a pool, in order to avoid raising any
That when injecting a dentry into a fragmented directory, we put it in the right fragment.
"""
- self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true",
- "--yes-i-really-mean-it")
+ self.fs.set_allow_dirfrags(True)
file_count = 100
file_names = ["%s" % n for n in range(0, file_count)]
# Create FS alpha and get mds_a to come up as active
fs_a = self.mds_cluster.newfs("alpha")
- fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name,
- 'allow_multimds', "true",
- "--yes-i-really-mean-it")
- fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2")
+ fs_a.set_allow_multimds(True)
+ fs_a.set_max_mds(2)
self.mds_cluster.mds_restart(mds_a)
self.wait_until_equal(lambda: fs_a.get_active_names(), [mds_a], 30)
self.assertEqual(info_a_s['state'], "up:standby-replay")
# Shrink the cluster
- fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "1")
+ fs_a.set_max_mds(1)
fs_a.mon_manager.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a.name))
self.wait_until_equal(
lambda: fs_a.get_active_names(), [mds_a],
def test_grow_shrink(self):
# Usual setup...
fs_a, fs_b = self._setup_two()
- fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name,
- "allow_multimds", "true",
- "--yes-i-really-mean-it")
-
- fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name,
- "allow_multimds", "true",
- "--yes-i-really-mean-it")
+ fs_a.set_allow_multimds(True)
+ fs_b.set_allow_multimds(True)
# Increase max_mds on fs_b, see a standby take up the role
- fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "2")
+ fs_b.set_max_mds(2)
self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
reject_fn=lambda v: v > 2 or v < 1)
# Increase max_mds on fs_a, see a standby take up the role
- fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2")
+ fs_a.set_max_mds(2)
self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
reject_fn=lambda v: v > 2 or v < 1)
# Shrink fs_b back to 1, see a daemon go back to standby
- fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "1")
- fs_b.mon_manager.raw_cluster_cmd('mds', 'deactivate', "{0}:1".format(fs_b.name))
+ fs_b.set_max_mds(1)
+ fs_b.deactivate(1)
self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
reject_fn=lambda v: v > 2 or v < 1)
# Grow fs_a up to 3, see the former fs_b daemon join it.
- fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "3")
+ fs_a.set_max_mds(3)
self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
reject_fn=lambda v: v > 3 or v < 2)
# Create two filesystems which should have two ranks each
fs_a = self.mds_cluster.newfs("alpha")
- fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name,
- "allow_multimds", "true",
- "--yes-i-really-mean-it")
+ fs_a.set_allow_multimds(True)
fs_b = self.mds_cluster.newfs("bravo")
- fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name,
- "allow_multimds", "true",
- "--yes-i-really-mean-it")
-
- fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name,
- 'max_mds', "2")
- fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name,
- 'max_mds', "2")
+ fs_b.set_allow_multimds(True)
+
+ fs_a.set_max_mds(2)
+ fs_b.set_max_mds(2)
# Set all the daemons to have a FSCID assignment but no other
# standby preferences.
for k, v in kwargs.items():
self.ceph_cluster.set_ceph_conf("mds", k, v.__str__())
- self.fs.mon_manager.raw_cluster_cmd("fs", "set", self.fs.name,
- "allow_dirfrags", "true",
- "--yes-i-really-mean-it")
+ self.fs.set_allow_dirfrags(True)
self.mds_cluster.mds_fail_restart()
self.fs.wait_for_daemons()
"""
# Set max_mds to 2
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
- "true", "--yes-i-really-mean-it")
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+ self.fs.set_allow_multimds(True)
+ self.fs.set_max_mds(2)
# See that we have two active MDSs
self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
class TestMantle(CephFSTestCase):
def start_mantle(self):
self.wait_for_health_clear(timeout=30)
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
- "true", "--yes-i-really-mean-it")
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+ self.fs.set_allow_multimds(True)
+ self.fs.set_max_mds(2)
self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
reject_fn=lambda v: v > 2 or v < 1)
self.fs.wait_for_daemons()
# I would like two MDSs, so that I can do an export dir later
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
- "true", "--yes-i-really-mean-it")
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+ self.fs.set_allow_multimds(True)
+ self.fs.set_max_mds(2)
self.fs.wait_for_daemons()
active_mds_names = self.fs.get_active_names()
"""
# Set up two MDSs
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
- "true", "--yes-i-really-mean-it")
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+ self.fs.set_allow_multimds(True)
+ self.fs.set_max_mds(2)
# See that we have two active MDSs
self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
self.assertTrue(self.fs.data_objects_present(ino, size_mb * 1024 * 1024))
# Shut down rank 1
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "1")
- self.fs.mon_manager.raw_cluster_cmd_result('mds', 'deactivate', "1")
+ self.fs.set_max_mds(1)
+ self.fs.deactivate(1)
# Wait til we get to a single active MDS mdsmap state
def is_stopped():
That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged.
"""
- self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true", "--yes-i-really-mean-it")
+ self.fs.set_allow_dirfrags(True)
LOW_LIMIT = 50
for mds in self.fs.get_daemon_names():