From 3d3b095bb13b4cc519f51d74b6b42f89aede93ba Mon Sep 17 00:00:00 2001 From: John Spray Date: Sat, 7 Feb 2015 09:33:58 +0000 Subject: [PATCH] tasks: lots of s/mds_restart/mds_fail_restart/ Wherever we are subsequently waiting for daemons to be healthy, we should be doing a fail during the restart. Also catch some places that were doing this longhand and use the handy fail_restart version instead. Signed-off-by: John Spray --- tasks/mds_auto_repair.py | 4 ++-- tasks/mds_client_limits.py | 2 +- tasks/mds_client_recovery.py | 12 +++--------- tasks/mds_journal_repair.py | 4 ++-- 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/tasks/mds_auto_repair.py b/tasks/mds_auto_repair.py index e7580613b2a1a..a95b368318657 100644 --- a/tasks/mds_auto_repair.py +++ b/tasks/mds_auto_repair.py @@ -30,7 +30,7 @@ class TestMDSAutoRepair(CephFSTestCase): # trim log segment as fast as possible self.set_conf('mds', 'mds cache size', 100) self.set_conf('mds', 'mds verify backtrace', 1) - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() create_script = "mkdir {0}; for i in `seq 0 500`; do touch {0}/file$i; done" @@ -98,7 +98,7 @@ class TestMDSAutoRepair(CephFSTestCase): self.assertTrue(writer.finished) # restart mds to make it writable - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() diff --git a/tasks/mds_client_limits.py b/tasks/mds_client_limits.py index ae72288675342..ff91d98236ae9 100644 --- a/tasks/mds_client_limits.py +++ b/tasks/mds_client_limits.py @@ -63,7 +63,7 @@ class TestClientLimits(CephFSTestCase): open_files = 250 self.set_conf('mds', 'mds cache size', cache_size) - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() mount_a_client_id = self.mount_a.get_global_id() diff --git a/tasks/mds_client_recovery.py b/tasks/mds_client_recovery.py index 0b986f4d4da9e..4873b7f77c337 100644 --- a/tasks/mds_client_recovery.py +++ b/tasks/mds_client_recovery.py @@ -57,9 +57,7 @@ class TestClientRecovery(CephFSTestCase): # Check that after an MDS restart both clients reconnect and continue # to handle I/O # ===================================================== - self.fs.mds_stop() - self.fs.mds_fail() - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) self.mount_a.create_destroy() @@ -254,9 +252,7 @@ class TestClientRecovery(CephFSTestCase): self.assertGreaterEqual(num_caps, count) # Restart MDS. client should trim its cache when reconnecting to the MDS - self.fs.mds_stop() - self.fs.mds_fail() - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) num_caps = self._session_num_caps(client_id) @@ -320,9 +316,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_b.wait_for_visible("background_file-2") self.mount_b.check_filelock() - self.fs.mds_stop() - self.fs.mds_fail() - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) self.mount_b.check_filelock() diff --git a/tasks/mds_journal_repair.py b/tasks/mds_journal_repair.py index 62ed0534d890c..f08f464e1869b 100644 --- a/tasks/mds_journal_repair.py +++ b/tasks/mds_journal_repair.py @@ -85,7 +85,7 @@ class TestJournalRepair(CephFSTestCase): # Now check the MDS can read what we wrote: truncate the journal # and start the mds. self.fs.journal_tool(['journal', 'reset']) - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() # List files @@ -280,7 +280,7 @@ class TestJournalRepair(CephFSTestCase): # Bring an MDS back online, mount a client, and see that we can walk the full # filesystem tree again - self.fs.mds_restart(active_mds_names[0]) + self.fs.mds_fail_restart(active_mds_names[0]) self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30, reject_fn=lambda v: len(v) > 1) self.mount_a.mount() -- 2.39.5