From 18d5cae625249863db5fd1b6b59470b4632bc61d Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 5 Feb 2025 13:12:10 -0500 Subject: [PATCH] qa: remove redundant and broken test Scrub does not fix damaged dirfrags for any type of damage we currently mark dirfrags damaged for (corrupt fnode / missing dirfrag object). In any case, this scenario is covered in cephfs_data_scan with correct checks for damage / handling. Fixes: 7f0cf0b7a2d94dd2189de4bef5865b024f3c7d4b Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/test_forward_scrub.py | 81 --------------------------- 1 file changed, 81 deletions(-) diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index a18839f76ae7e..615fc8894bff1 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -390,87 +390,6 @@ class TestForwardScrub(CephFSTestCase): # Clean up the omap object self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk]) - def test_health_status_after_dirfrag_repair(self): - """ - Test that the damage health status is cleared - after the damaged dirfrag is repaired - """ - self.mount_a.run_shell(["mkdir", "dir"]) - self.mount_a.run_shell(["touch", "dir/file"]) - self.mount_a.run_shell(["mkdir", "testdir"]) - self.mount_a.run_shell(["ln", "dir/file", "testdir/hardlink"]) - - dir_ino = self.mount_a.path_to_ino("dir") - - # Ensure everything is written to backing store - self.mount_a.umount_wait() - self.fs.mds_asok(["flush", "journal"]) - - # Drop everything from the MDS cache - self.fs.fail() - - self.fs.radosm(["rm", "{0:x}.00000000".format(dir_ino)]) - - self.fs.journal_tool(['journal', 'reset', '--yes-i-really-really-mean-it'], 0) - self.fs.set_joinable() - self.fs.wait_for_daemons() - self.mount_a.mount_wait() - - # Check that touching the hardlink gives EIO - ran = self.mount_a.run_shell(["stat", "testdir/hardlink"], wait=False) - try: - ran.wait() - except CommandFailedError: - self.assertTrue("Input/output error" in ran.stderr.getvalue()) - - out_json = self.fs.run_scrub(["start", "/dir", "recursive"]) - self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) - - # Check that an entry is created in the damage table - damage = json.loads( - self.fs.mon_manager.raw_cluster_cmd( - 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), - "damage", "ls", '--format=json-pretty')) - self.assertEqual(len(damage), 3) - damage_types = set() - for i in range(0, 3): - damage_types.add(damage[i]['damage_type']) - self.assertIn("dir_frag", damage_types) - self.wait_until_true(lambda: self._is_MDS_damage(), timeout=100) - - out_json = self.fs.run_scrub(["start", "/dir", "recursive,repair"]) - self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) - - # Check that the entry is cleared from the damage table - damage = json.loads( - self.fs.mon_manager.raw_cluster_cmd( - 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), - "damage", "ls", '--format=json-pretty')) - self.assertEqual(len(damage), 1) - self.assertNotEqual(damage[0]['damage_type'], "dir_frag") - - self.mount_a.umount_wait() - self.fs.mds_asok(["flush", "journal"]) - self.fs.fail() - - # Run cephfs-data-scan - self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()]) - self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()]) - self.fs.data_scan(["scan_links"]) - - self.fs.set_joinable() - self.fs.wait_for_daemons() - self.mount_a.mount_wait() - - out_json = self.fs.run_scrub(["start", "/dir", "recursive,repair"]) - self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) - damage = json.loads( - self.fs.mon_manager.raw_cluster_cmd( - 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), - "damage", "ls", '--format=json-pretty')) - self.assertEqual(len(damage), 0) - self.wait_until_true(lambda: not self._is_MDS_damage(), timeout=100) - def test_health_status_after_backtrace_repair(self): """ Test that the damage health status is cleared -- 2.39.5