raise RuntimeError("cannot specify fscid when configuring overlay")
self.metadata_overlay = overlay
- def deactivate(self, rank):
- if rank < 0:
- raise RuntimeError("invalid rank")
- elif rank == 0:
- raise RuntimeError("cannot deactivate rank 0")
- self.mon_manager.raw_cluster_cmd("mds", "deactivate", "%d:%d" % (self.id, rank))
-
def set_var(self, var, *args):
a = map(str, args)
self.mon_manager.raw_cluster_cmd("fs", "set", self.name, var, *a)
# Shrink the cluster
fs_a.set_max_mds(1)
- fs_a.mon_manager.raw_cluster_cmd("mds", "deactivate", "{0}:1".format(fs_a.name))
self.wait_until_equal(
lambda: fs_a.get_active_names(), [mds_a],
60
# Shrink fs_b back to 1, see a daemon go back to standby
fs_b.set_max_mds(1)
- fs_b.deactivate(1)
self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
reject_fn=lambda v: v > 2 or v < 1)
# Shut down rank 1
self.fs.set_max_mds(1)
- self.fs.deactivate(1)
# It shouldn't proceed past stopping because its still not allowed
# to purge
"--mds_max_purge_files 100")
# It should now proceed through shutdown
- self.wait_until_true(
- lambda: self._is_stopped(1),
- timeout=60
- )
+ self.fs.wait_for_daemons(timeout=120)
# ...and in the process purge all that data
self.await_data_pool_empty()
# Shut down rank 1
self.fs.set_max_mds(1)
- self.fs.deactivate(1)
-
- # Wait til we get to a single active MDS mdsmap state
- self.wait_until_true(lambda: self._is_stopped(1), timeout=120)
+ self.fs.wait_for_daemons(timeout=120)
# See that the stray counter on rank 0 has incremented
self.assertEqual(self.get_mdc_stat("strays_created", rank_0_id), 1)