]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add tests for adding EC data pools
authorPatrick Donnelly <pdonnell@redhat.com>
Fri, 13 Dec 2019 00:43:14 +0000 (16:43 -0800)
committerPatrick Donnelly <pdonnell@redhat.com>
Fri, 20 Dec 2019 20:53:10 +0000 (12:53 -0800)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_admin.py

index 5c778231f0b0ba28624995b10e9bced8d25ce312..96f770a35db4f0f8e4a9f2bd612ba623daa71a6e 100644 (file)
@@ -25,6 +25,30 @@ log = logging.getLogger(__name__)
 DAEMON_WAIT_TIMEOUT = 120
 ROOT_INO = 1
 
+class FileLayout(object):
+    def __init__(self, pool=None, pool_namespace=None, stripe_unit=None, stripe_count=None, object_size=None):
+        self.pool = pool
+        self.pool_namespace = pool_namespace
+        self.stripe_unit = stripe_unit
+        self.stripe_count = stripe_count
+        self.object_size = object_size
+
+    @classmethod
+    def load_from_ceph(layout_str):
+        # TODO
+        pass
+
+    def items(self):
+        if self.pool is not None:
+            yield ("pool", self.pool)
+        if self.pool_namespace:
+            yield ("pool_namespace", self.pool_namespace)
+        if self.stripe_unit is not None:
+            yield ("stripe_unit", self.stripe_unit)
+        if self.stripe_count is not None:
+            yield ("stripe_count", self.stripe_count)
+        if self.object_size is not None:
+            yield ("object_size", self.stripe_size)
 
 class ObjectNotFound(Exception):
     def __init__(self, object_name):
@@ -630,8 +654,13 @@ class Filesystem(MDSCluster):
     def get_var(self, var, status=None):
         return self.get_mds_map(status=status)[var]
 
-    def add_data_pool(self, name):
-        self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
+    def set_dir_layout(self, mount, path, layout):
+        for name, value in layout.items():
+            mount.run_shell(args=["setfattr", "-n", "ceph.dir.layout."+name, "-v", str(value), path])
+
+    def add_data_pool(self, name, create=True):
+        if create:
+            self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
         self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name)
         self.get_pool_names(refresh = True)
         for poolid, fs_name in self.data_pools.items():
index 6459ab77e548f75068e045317f4b1cae1a4ac509..374f75a72cf0c3a9c57a05054ed134ea848056d9 100644 (file)
@@ -1,7 +1,10 @@
+from teuthology.orchestra.run import CommandFailedError
 
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
 from tasks.cephfs.fuse_mount import FuseMount
 
+from tasks.cephfs.filesystem import FileLayout
+
 class TestAdminCommands(CephFSTestCase):
     """
     Tests for administration command.
@@ -18,6 +21,97 @@ class TestAdminCommands(CephFSTestCase):
         s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
         self.assertTrue("active" in s)
 
+    def _setup_ec_pools(self, n, metadata=True, overwrites=True):
+        if metadata:
+            self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
+        cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
+        self.fs.mon_manager.raw_cluster_cmd(*cmd)
+        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+        if overwrites:
+            self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+
+    def test_add_data_pool_root(self):
+        """
+        That a new data pool can be added and used for the root directory.
+        """
+
+        p = self.fs.add_data_pool("foo")
+        self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
+
+    def test_add_data_pool_subdir(self):
+        """
+        That a new data pool can be added and used for a sub-directory.
+        """
+
+        p = self.fs.add_data_pool("foo")
+        self.mount_a.run_shell("mkdir subdir")
+        self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
+
+    def test_add_data_pool_ec(self):
+        """
+        That a new EC data pool can be added.
+        """
+
+        n = "test_add_data_pool_ec"
+        self._setup_ec_pools(n, metadata=False)
+        p = self.fs.add_data_pool(n+"-data", create=False)
+
+    def test_new_default_ec(self):
+        """
+        That a new file system warns/fails with an EC default data pool.
+        """
+
+        self.fs.delete_all_filesystems()
+        n = "test_new_default_ec"
+        self._setup_ec_pools(n)
+        try:
+            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+        except CommandFailedError as e:
+            if e.exitstatus == 22:
+                pass
+            else:
+                raise
+        else:
+            raise RuntimeError("expected failure")
+
+    def test_new_default_ec_force(self):
+        """
+        That a new file system succeeds with an EC default data pool with --force.
+        """
+
+        self.fs.delete_all_filesystems()
+        n = "test_new_default_ec_force"
+        self._setup_ec_pools(n)
+        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+
+    def test_new_default_ec_no_overwrite(self):
+        """
+        That a new file system fails with an EC default data pool without overwrite.
+        """
+
+        self.fs.delete_all_filesystems()
+        n = "test_new_default_ec_no_overwrite"
+        self._setup_ec_pools(n, overwrites=False)
+        try:
+            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+        except CommandFailedError as e:
+            if e.exitstatus == 22:
+                pass
+            else:
+                raise
+        else:
+            raise RuntimeError("expected failure")
+        # and even with --force !
+        try:
+            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+        except CommandFailedError as e:
+            if e.exitstatus == 22:
+                pass
+            else:
+                raise
+        else:
+            raise RuntimeError("expected failure")
+
 class TestConfigCommands(CephFSTestCase):
     """
     Test that daemons and clients respond to the otherwise rarely-used