]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: remove redundant and broken test 61562/head
authorPatrick Donnelly <pdonnell@ibm.com>
Wed, 5 Feb 2025 18:12:10 +0000 (13:12 -0500)
committerPatrick Donnelly <pdonnell@ibm.com>
Wed, 5 Feb 2025 18:13:09 +0000 (13:13 -0500)
Scrub does not fix damaged dirfrags for any type of damage we currently mark
dirfrags damaged for (corrupt fnode / missing dirfrag object).

In any case, this scenario is covered in cephfs_data_scan with correct checks
for damage / handling.

Fixes: 7f0cf0b7a2d94dd2189de4bef5865b024f3c7d4b
Signed-off-by: Patrick Donnelly <pdonnell@ibm.com>
qa/tasks/cephfs/test_forward_scrub.py

index a18839f76ae7e7aa3232727d4f382a56c97b8797..615fc8894bff1a3781c86aaa3a2e10c5f42af40f 100644 (file)
@@ -390,87 +390,6 @@ class TestForwardScrub(CephFSTestCase):
         # Clean up the omap object
         self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
 
-    def test_health_status_after_dirfrag_repair(self):
-        """
-        Test that the damage health status is cleared
-        after the damaged dirfrag is repaired
-        """
-        self.mount_a.run_shell(["mkdir", "dir"])
-        self.mount_a.run_shell(["touch", "dir/file"])
-        self.mount_a.run_shell(["mkdir", "testdir"])
-        self.mount_a.run_shell(["ln", "dir/file", "testdir/hardlink"])
-
-        dir_ino = self.mount_a.path_to_ino("dir")
-
-        # Ensure everything is written to backing store
-        self.mount_a.umount_wait()
-        self.fs.mds_asok(["flush", "journal"])
-
-        # Drop everything from the MDS cache
-        self.fs.fail()
-
-        self.fs.radosm(["rm", "{0:x}.00000000".format(dir_ino)])
-
-        self.fs.journal_tool(['journal', 'reset', '--yes-i-really-really-mean-it'], 0)
-        self.fs.set_joinable()
-        self.fs.wait_for_daemons()
-        self.mount_a.mount_wait()
-
-        # Check that touching the hardlink gives EIO
-        ran = self.mount_a.run_shell(["stat", "testdir/hardlink"], wait=False)
-        try:
-            ran.wait()
-        except CommandFailedError:
-            self.assertTrue("Input/output error" in ran.stderr.getvalue())
-
-        out_json = self.fs.run_scrub(["start", "/dir", "recursive"])
-        self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
-
-        # Check that an entry is created in the damage table
-        damage = json.loads(
-            self.fs.mon_manager.raw_cluster_cmd(
-                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
-                "damage", "ls", '--format=json-pretty'))
-        self.assertEqual(len(damage), 3)
-        damage_types = set()
-        for i in range(0, 3):
-            damage_types.add(damage[i]['damage_type'])
-        self.assertIn("dir_frag", damage_types)
-        self.wait_until_true(lambda: self._is_MDS_damage(), timeout=100)
-
-        out_json = self.fs.run_scrub(["start", "/dir", "recursive,repair"])
-        self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
-
-        # Check that the entry is cleared from the damage table
-        damage = json.loads(
-            self.fs.mon_manager.raw_cluster_cmd(
-                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
-                "damage", "ls", '--format=json-pretty'))
-        self.assertEqual(len(damage), 1)
-        self.assertNotEqual(damage[0]['damage_type'], "dir_frag")
-
-        self.mount_a.umount_wait()
-        self.fs.mds_asok(["flush", "journal"])
-        self.fs.fail()
-
-        # Run cephfs-data-scan
-        self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()])
-        self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()])
-        self.fs.data_scan(["scan_links"])
-
-        self.fs.set_joinable()
-        self.fs.wait_for_daemons()
-        self.mount_a.mount_wait()
-
-        out_json = self.fs.run_scrub(["start", "/dir", "recursive,repair"])
-        self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
-        damage = json.loads(
-            self.fs.mon_manager.raw_cluster_cmd(
-                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
-                "damage", "ls", '--format=json-pretty'))
-        self.assertEqual(len(damage), 0)
-        self.wait_until_true(lambda: not self._is_MDS_damage(), timeout=100)
-
     def test_health_status_after_backtrace_repair(self):
         """
         Test that the damage health status is cleared