]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add a test to verify that a damage hard link id detected during scrub
authorsajibreadd <sajibreadd@gmail.com>
Thu, 24 Apr 2025 13:24:33 +0000 (15:24 +0200)
committerVenky Shankar <vshankar@redhat.com>
Fri, 26 Sep 2025 05:07:55 +0000 (10:37 +0530)
Also update ignorelist with the expected cluster warning.

Fixes: https://tracker.ceph.com/issues/69345
Signed-off-by: Md Mahamudur Rahaman Sajib <mahamudur.sajib@croit.io>
qa/suites/fs/functional/tasks/scrub.yaml
qa/tasks/cephfs/test_scrub_checks.py

index 09e6668493b01da5fee82b4739c0451dfe945938..dfebef6451408b405ecd1ad2506a8c69d74e73dd 100644 (file)
@@ -8,6 +8,7 @@ overrides:
       - bad backtrace on inode
       - overall HEALTH_
       - \(MDS_TRIM\)
+      - object missing on disk
     conf:
       mds:
         mds log max segments: 1
index 60646bb22502842f3171a76ce8c2170b042f4732..56125347163f71290a1c4129804a0581a7a2cd07 100644 (file)
@@ -477,6 +477,44 @@ class TestScrubChecks(CephFSTestCase):
             timeout=30
         )
 
+    def test_scrub_remote_link(self):
+        """
+        test scrub remote link
+        """
+        test_dir_path = "test_dir"
+        self.mount_a.run_shell(["mkdir", test_dir_path])
+        file_dir_path = os.path.join(test_dir_path, "file_dir")
+        self.mount_a.run_shell(["mkdir", file_dir_path])
+        file_path = os.path.join(file_dir_path, "test_file.txt")
+        link_path = os.path.join(test_dir_path, "test_link")
+        abs_link_path = "/" + link_path
+        self.mount_a.run_shell(["touch", file_path])
+        self.mount_a.run_shell(["ln", file_path, link_path])
+        file_ino = self.mount_a.path_to_ino(file_path)
+        dir_ino = self.mount_a.path_to_ino(file_dir_path)
+        rados_obj_file = "{ino:x}.00000000".format(ino=file_ino)
+        rados_obj_dir = "{ino:x}.00000000".format(ino=dir_ino)
+        self.fs.flush()
+        self.fs.rados(["rm", rados_obj_file], pool=self.fs.get_data_pool_name())
+        self.fs.rados(["rm", rados_obj_dir], pool=self.fs.get_metadata_pool_name())
+        self.fs.mds_fail_restart()
+        self.fs.wait_for_daemons()
+        status = self.fs.mds_asok(['status'])
+        self.assertEqual("up:active", str(status['state']))
+        mds_rank = self.fs.get_rank()['rank']
+        success_validator = lambda j, r: self.json_validator(j, r, "return_code", 0)
+        scrub_json = self.tell_command(mds_rank, 
+            "scrub start /{0} recursive force".format(test_dir_path), 
+                success_validator)
+        self.assertEqual(
+            self.fs.wait_until_scrub_complete(tag=scrub_json["scrub_tag"]), True)
+        damage_json = self.tell_command(mds_rank, "damage ls")
+        found_remote_link_damage = False
+        for entry in damage_json:
+            if entry["path"] == abs_link_path :
+                found_remote_link_damage = True
+        self.assertEqual(found_remote_link_damage, True)
+
     def test_stray_evaluation_with_scrub(self):
         """
         test that scrub can iterate over ~mdsdir and evaluate strays
@@ -501,7 +539,7 @@ class TestScrubChecks(CephFSTestCase):
                 jv=element_value, ev=expected_value)
         return True, "Succeeded"
 
-    def tell_command(self, mds_rank, command, validator):
+    def tell_command(self, mds_rank, command, validator=None):
         log.info("Running command '{command}'".format(command=command))
 
         command_list = command.split()
@@ -510,9 +548,10 @@ class TestScrubChecks(CephFSTestCase):
         log.info("command '{command}' returned '{jout}'".format(
                      command=command, jout=jout))
 
-        success, errstring = validator(jout, 0)
-        if not success:
-            raise AsokCommandFailedError(command, 0, jout, errstring)
+        if validator:
+            success, errstring = validator(jout, 0)
+            if not success:
+                raise AsokCommandFailedError(command, 0, jout, errstring)
         return jout
 
     @staticmethod