]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
test: optionally create a backup filesystem on startup
authorVenky Shankar <vshankar@redhat.com>
Tue, 18 Aug 2020 09:32:38 +0000 (05:32 -0400)
committerVenky Shankar <vshankar@redhat.com>
Tue, 19 Jan 2021 06:08:10 +0000 (01:08 -0500)
Also filter out client-id's starting with "mirror" when
cleaning leftover auth-ids since teuthology would be
configured to create client.mirror and client.mirror_remote
clients before executing mirroring tests.

Signed-off-by: Venky Shankar <vshankar@redhat.com>
qa/tasks/ceph_test_case.py
qa/tasks/cephfs/cephfs_test_case.py

index 0958caf2fa40602ffe53de4986a69b2a65afe1b0..23cf4839f63f4ef6cb20a1c9c041a2ad5ed6cceb 100644 (file)
@@ -20,6 +20,7 @@ class CephTestCase(unittest.TestCase):
     mounts = None
     fs = None
     recovery_fs = None
+    backup_fs = None
     ceph_cluster = None
     mds_cluster = None
     mgr_cluster = None
index 3c5a23d3ce7330b18fe37788aca8d5b7e0c2ceb8..9d689456c9de4a5521aed15e642b6ea5f1e23fa8 100644 (file)
@@ -80,6 +80,10 @@ class CephFSTestCase(CephTestCase):
     # requires REQUIRE_FILESYSTEM = True
     REQUIRE_RECOVERY_FILESYSTEM = False
 
+    # create a backup filesystem if required.
+    # required REQUIRE_FILESYSTEM enabled
+    REQUIRE_BACKUP_FILESYSTEM = False
+
     LOAD_SETTINGS = [] # type: ignore
 
     def _save_mount_details(self):
@@ -127,6 +131,7 @@ class CephFSTestCase(CephTestCase):
         self.mds_cluster.delete_all_filesystems()
         self.mds_cluster.mds_restart() # to reset any run-time configs, etc.
         self.fs = None # is now invalid!
+        self.backup_fs = None
         self.recovery_fs = None
 
         # In case anything is in the OSD blocklist list, clear it out.  This is to avoid
@@ -146,7 +151,7 @@ class CephFSTestCase(CephTestCase):
         # test, delete them
         for entry in self.auth_list():
             ent_type, ent_id = entry['entity'].split(".")
-            if ent_type == "client" and ent_id not in client_mount_ids and ent_id != "admin":
+            if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'):
                 self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
 
         if self.REQUIRE_FILESYSTEM:
@@ -172,6 +177,15 @@ class CephFSTestCase(CephTestCase):
             for i in range(0, self.CLIENTS_REQUIRED):
                 self.mounts[i].mount_wait()
 
+        if self.REQUIRE_BACKUP_FILESYSTEM:
+            if not self.REQUIRE_FILESYSTEM:
+                self.skipTest("backup filesystem requires a primary filesystem as well")
+            self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
+                                                'enable_multiple', 'true',
+                                                '--yes-i-really-mean-it')
+            self.backup_fs = self.mds_cluster.newfs(name="backup_fs")
+            self.backup_fs.wait_for_daemons()
+
         if self.REQUIRE_RECOVERY_FILESYSTEM:
             if not self.REQUIRE_FILESYSTEM:
                 self.skipTest("Recovery filesystem requires a primary filesystem as well")