]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/cephfs: add test_backtrace
authorJohn Spray <jspray@redhat.com>
Thu, 25 Jun 2015 00:38:38 +0000 (01:38 +0100)
committerJohn Spray <jspray@redhat.com>
Thu, 25 Jun 2015 16:19:03 +0000 (17:19 +0100)
This is for verifying the new layout-writing behaviour.  While
we're at it, test that the pre-existing backtrace behaviours
are really happening (updating old_pools)

Signed-off-by: John Spray <john.spray@redhat.com>
tasks/cephfs/filesystem.py
tasks/cephfs/test_backtrace.py [new file with mode: 0644]

index bf3a739e8ad63f35ec34d53d0372985c7aa035f0..d72e276d45be6019f71f8a4e9c1cb3a58327c63d 100644 (file)
@@ -59,10 +59,19 @@ class Filesystem(object):
         self.client_id = client_list[0]
         self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
 
-    def create(self):
+    def get_pgs_per_fs_pool(self):
+        """
+        Calculate how many PGs to use when creating a pool, in order to avoid raising any
+        health warnings about mon_pg_warn_min_per_osd
+
+        :return: an integer number of PGs
+        """
         pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
         osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
-        pgs_per_fs_pool = pg_warn_min_per_osd * osd_count
+        return pg_warn_min_per_osd * osd_count
+
+    def create(self):
+        pgs_per_fs_pool = self.get_pgs_per_fs_pool()
 
         self.admin_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'metadata', pgs_per_fs_pool.__str__()])
         self.admin_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'data', pgs_per_fs_pool.__str__()])
@@ -479,34 +488,18 @@ class Filesystem(object):
                 time.sleep(1)
                 elapsed += 1
 
-    def read_backtrace(self, ino_no):
-        """
-        Read the backtrace from the data pool, return a dict in the format
-        given by inode_backtrace_t::dump, which is something like:
-
-        ::
-
-            rados -p cephfs_data getxattr 10000000002.00000000 parent > out.bin
-            ceph-dencoder type inode_backtrace_t import out.bin decode dump_json
-
-            { "ino": 1099511627778,
-              "ancestors": [
-                    { "dirino": 1,
-                      "dname": "blah",
-                      "version": 11}],
-              "pool": 1,
-              "old_pools": []}
-
-        """
+    def _read_data_xattr(self, ino_no, xattr_name, type, pool):
         mds_id = self.mds_ids[0]
         remote = self.mds_daemons[mds_id].remote
+        if pool is None:
+            pool = self.get_data_pool_name()
 
         obj_name = "{0:x}.00000000".format(ino_no)
 
         temp_file = "/tmp/{0}_{1}".format(obj_name, datetime.datetime.now().isoformat())
 
         args = [
-            "rados", "-p", self.get_data_pool_name(), "getxattr", obj_name, "parent",
+            "rados", "-p", pool, "getxattr", obj_name, xattr_name,
             run.Raw(">"), temp_file
         ]
         try:
@@ -518,12 +511,51 @@ class Filesystem(object):
             raise ObjectNotFound(obj_name)
 
         p = remote.run(
-            args=["ceph-dencoder", "type", "inode_backtrace_t", "import", temp_file, "decode", "dump_json"],
+            args=["ceph-dencoder", "type", type, "import", temp_file, "decode", "dump_json"],
             stdout=StringIO()
         )
 
         return json.loads(p.stdout.getvalue().strip())
 
+    def read_backtrace(self, ino_no, pool=None):
+        """
+        Read the backtrace from the data pool, return a dict in the format
+        given by inode_backtrace_t::dump, which is something like:
+
+        ::
+
+            rados -p cephfs_data getxattr 10000000002.00000000 parent > out.bin
+            ceph-dencoder type inode_backtrace_t import out.bin decode dump_json
+
+            { "ino": 1099511627778,
+              "ancestors": [
+                    { "dirino": 1,
+                      "dname": "blah",
+                      "version": 11}],
+              "pool": 1,
+              "old_pools": []}
+
+        :param pool: name of pool to read backtrace from.  If omitted, FS must have only
+                     one data pool and that will be used.
+        """
+        return self._read_data_xattr(ino_no, "parent", "inode_backtrace_t", pool)
+
+    def read_layout(self, ino_no, pool=None):
+        """
+        Read 'layout' xattr of an inode and parse the result, returning a dict like:
+        ::
+            {
+                "stripe_unit": 4194304,
+                "stripe_count": 1,
+                "object_size": 4194304,
+                "pg_pool": 1
+            }
+
+        :param pool: name of pool to read backtrace from.  If omitted, FS must have only
+                     one data pool and that will be used.
+        """
+        return self._read_data_xattr(ino_no, "layout", "ceph_file_layout_wrapper", pool)
+
     def _enumerate_data_objects(self, ino, size):
         """
         Get the list of expected data objects for a range, and the list of objects
diff --git a/tasks/cephfs/test_backtrace.py b/tasks/cephfs/test_backtrace.py
new file mode 100644 (file)
index 0000000..4cdc1b6
--- /dev/null
@@ -0,0 +1,80 @@
+
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+
+class TestBacktrace(CephFSTestCase):
+    def test_backtrace(self):
+        """
+        That the 'parent' and 'layout' xattrs on the head objects of files
+        are updated correctly.
+        """
+
+        def get_pool_id(name):
+            return self.fs.mon_manager.get_pool_dump(name)['pool']
+
+        old_data_pool_name = self.fs.get_data_pool_name()
+        old_pool_id = get_pool_id(old_data_pool_name)
+
+        # Create a file for subsequent checks
+        self.mount_a.run_shell(["mkdir", "parent_a"])
+        self.mount_a.run_shell(["touch", "parent_a/alpha"])
+        file_ino = self.mount_a.path_to_ino("parent_a/alpha")
+
+        # That backtrace and layout are written after initial flush
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace = self.fs.read_backtrace(file_ino)
+        self.assertEqual(['alpha', 'parent_a'], [a['dname'] for a in backtrace['ancestors']])
+        layout = self.fs.read_layout(file_ino)
+        self.assertDictEqual(layout, {
+            "stripe_unit": 4194304,
+            "stripe_count": 1,
+            "object_size": 4194304,
+            "pg_pool": old_pool_id
+        })
+        self.assertEqual(backtrace['pool'], old_pool_id)
+
+        # That backtrace is written after parentage changes
+        self.mount_a.run_shell(["mkdir", "parent_b"])
+        self.mount_a.run_shell(["mv", "parent_a/alpha", "parent_b/alpha"])
+
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace = self.fs.read_backtrace(file_ino)
+        self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace['ancestors']])
+
+        # Create a new data pool
+        new_pool_name = "data_new"
+        self.fs.admin_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', new_pool_name,
+                                       self.fs.get_pgs_per_fs_pool().__str__()])
+        self.fs.admin_remote.run(args=['sudo', 'ceph', 'mds', 'add_data_pool', new_pool_name])
+        new_pool_id = get_pool_id(new_pool_name)
+
+        # That an object which has switched pools gets its backtrace updated
+        self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout.pool", "-v", new_pool_name, "./parent_b/alpha"])
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name)
+        self.assertEqual(backtrace_old_pool['pool'], new_pool_id)
+        backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name)
+        self.assertEqual(backtrace_new_pool['pool'], new_pool_id)
+        new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name)
+        self.assertEqual(new_pool_layout['pg_pool'], new_pool_id)
+
+        # That subsequent linkage changes are only written to new pool backtrace
+        self.mount_a.run_shell(["mkdir", "parent_c"])
+        self.mount_a.run_shell(["mv", "parent_b/alpha", "parent_c/alpha"])
+        self.fs.mds_asok(["flush", "journal"])
+        backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name)
+        self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace_old_pool['ancestors']])
+        backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name)
+        self.assertEqual(['alpha', 'parent_c'], [a['dname'] for a in backtrace_new_pool['ancestors']])
+
+        # That layout is written to new pool after change to other field in layout
+        self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout.object_size", "-v", "8388608", "./parent_c/alpha"])
+
+        self.fs.mds_asok(["flush", "journal"])
+        new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name)
+        self.assertEqual(new_pool_layout['object_size'], 8388608)
+
+        # ...but not to the old pool: the old pool's backtrace points to the new pool, and that's enough,
+        # we don't update the layout in all the old pools whenever it changes
+        old_pool_layout = self.fs.read_layout(file_ino, pool=old_data_pool_name)
+        self.assertEqual(old_pool_layout['object_size'], 4194304)