]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/cephfs: rename Filesystem.reset to recreate
authorJohn Spray <john.spray@redhat.com>
Fri, 8 Jan 2016 17:59:35 +0000 (17:59 +0000)
committerJohn Spray <john.spray@redhat.com>
Fri, 11 Mar 2016 10:36:43 +0000 (10:36 +0000)
Name was ambiguous wrt "fs reset"

Signed-off-by: John Spray <john.spray@redhat.com>
tasks/cephfs/filesystem.py
tasks/cephfs/test_data_scan.py
tasks/cephfs/test_journal_migration.py
tasks/cephfs/test_journal_repair.py

index c696a8b60ffff5e34d094eeb6fc449f244691ad6..c0ff44ab3482b9d82a82c668b4daaa25c3842ee2 100644 (file)
@@ -242,6 +242,8 @@ class MDSCluster(object):
             if mds_info['name'] == mds_id:
                 return mds_info
 
+        return None
+
 
 class Filesystem(MDSCluster):
     """
@@ -458,7 +460,7 @@ class Filesystem(MDSCluster):
         else:
             return self.mds_ids[0]
 
-    def reset(self):
+    def recreate(self):
         log.info("Creating new filesystem")
 
         self.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "0")
@@ -466,8 +468,9 @@ class Filesystem(MDSCluster):
         for mds_id in self.mds_ids:
             assert not self.mds_daemons[mds_id].running()
             self.mon_manager.raw_cluster_cmd_result('mds', 'fail', mds_id)
-        self.mon_manager.raw_cluster_cmd_result('fs', 'rm', "default", "--yes-i-really-mean-it")
-        self.mon_manager.raw_cluster_cmd_result('fs', 'new', "default", "metadata", "data")
+        self.mon_manager.raw_cluster_cmd_result('fs', 'rm', self.name, "--yes-i-really-mean-it")
+        self.mon_manager.raw_cluster_cmd_result('fs', 'new', self.name,
+                self.metadata_pool_name, self.data_pool_name)
 
     def get_metadata_object(self, object_type, object_id):
         """
index e92447434a7a9373ecbe5bb46d9415530df85337..add807c987746832c81c9997979edf17c4f210bd 100644 (file)
@@ -338,7 +338,8 @@ class TestDataScan(CephFSTestCase):
 
         # Reset the MDS map in case multiple ranks were in play: recovery procedure
         # only understands how to rebuild metadata under rank 0
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', 'default', '--yes-i-really-mean-it')
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
+                '--yes-i-really-mean-it')
 
         # Attempt to start an MDS, see that it goes into damaged state
         self.fs.mds_restart()
index 6e5ae39eb6df6f06fe6889bd35d1065570d2896f..873603f7ab137d0e671c147e4a532fa52bf4a3b7 100644 (file)
@@ -19,7 +19,7 @@ class TestJournalMigration(CephFSTestCase):
         # Create a filesystem using the older journal format.
         self.mount_a.umount_wait()
         self.fs.mds_stop()
-        self.fs.reset()
+        self.fs.recreate()
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
 
index 2577701043b4f3fb10c5a34d664d14879fa98e62..a07bb0befe842d5dfd771fb15ed1c673426c5e1f 100644 (file)
@@ -242,7 +242,10 @@ class TestJournalRepair(CephFSTestCase):
 
         self.wait_until_true(is_marked_damaged, 60)
 
-        self.fs.wait_for_state("up:standby", timeout=60, mds_id=damaged_id)
+        self.wait_until_equal(
+                lambda: self.mds_cluster.get_mds_info(mds_id)['state'],
+                "up:standby",
+                timeout=60)
 
         self.fs.mds_stop(damaged_id)
         self.fs.mds_fail(damaged_id)