From 13eb6c6ce9e6c8f5a4a8bcdc1bba64bb92393cf1 Mon Sep 17 00:00:00 2001 From: sajibreadd Date: Thu, 24 Apr 2025 15:24:33 +0200 Subject: [PATCH] qa: add a test to verify that a damage hard link id detected during scrub Also update ignorelist with the expected cluster warning. Fixes: https://tracker.ceph.com/issues/69345 Signed-off-by: Md Mahamudur Rahaman Sajib --- qa/suites/fs/functional/tasks/scrub.yaml | 1 + qa/tasks/cephfs/test_scrub_checks.py | 47 ++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/qa/suites/fs/functional/tasks/scrub.yaml b/qa/suites/fs/functional/tasks/scrub.yaml index 09e6668493b01..dfebef6451408 100644 --- a/qa/suites/fs/functional/tasks/scrub.yaml +++ b/qa/suites/fs/functional/tasks/scrub.yaml @@ -8,6 +8,7 @@ overrides: - bad backtrace on inode - overall HEALTH_ - \(MDS_TRIM\) + - object missing on disk conf: mds: mds log max segments: 1 diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index 60646bb225028..56125347163f7 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -477,6 +477,44 @@ class TestScrubChecks(CephFSTestCase): timeout=30 ) + def test_scrub_remote_link(self): + """ + test scrub remote link + """ + test_dir_path = "test_dir" + self.mount_a.run_shell(["mkdir", test_dir_path]) + file_dir_path = os.path.join(test_dir_path, "file_dir") + self.mount_a.run_shell(["mkdir", file_dir_path]) + file_path = os.path.join(file_dir_path, "test_file.txt") + link_path = os.path.join(test_dir_path, "test_link") + abs_link_path = "/" + link_path + self.mount_a.run_shell(["touch", file_path]) + self.mount_a.run_shell(["ln", file_path, link_path]) + file_ino = self.mount_a.path_to_ino(file_path) + dir_ino = self.mount_a.path_to_ino(file_dir_path) + rados_obj_file = "{ino:x}.00000000".format(ino=file_ino) + rados_obj_dir = "{ino:x}.00000000".format(ino=dir_ino) + self.fs.flush() + self.fs.rados(["rm", rados_obj_file], pool=self.fs.get_data_pool_name()) + self.fs.rados(["rm", rados_obj_dir], pool=self.fs.get_metadata_pool_name()) + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + status = self.fs.mds_asok(['status']) + self.assertEqual("up:active", str(status['state'])) + mds_rank = self.fs.get_rank()['rank'] + success_validator = lambda j, r: self.json_validator(j, r, "return_code", 0) + scrub_json = self.tell_command(mds_rank, + "scrub start /{0} recursive force".format(test_dir_path), + success_validator) + self.assertEqual( + self.fs.wait_until_scrub_complete(tag=scrub_json["scrub_tag"]), True) + damage_json = self.tell_command(mds_rank, "damage ls") + found_remote_link_damage = False + for entry in damage_json: + if entry["path"] == abs_link_path : + found_remote_link_damage = True + self.assertEqual(found_remote_link_damage, True) + def test_stray_evaluation_with_scrub(self): """ test that scrub can iterate over ~mdsdir and evaluate strays @@ -501,7 +539,7 @@ class TestScrubChecks(CephFSTestCase): jv=element_value, ev=expected_value) return True, "Succeeded" - def tell_command(self, mds_rank, command, validator): + def tell_command(self, mds_rank, command, validator=None): log.info("Running command '{command}'".format(command=command)) command_list = command.split() @@ -510,9 +548,10 @@ class TestScrubChecks(CephFSTestCase): log.info("command '{command}' returned '{jout}'".format( command=command, jout=jout)) - success, errstring = validator(jout, 0) - if not success: - raise AsokCommandFailedError(command, 0, jout, errstring) + if validator: + success, errstring = validator(jout, 0) + if not success: + raise AsokCommandFailedError(command, 0, jout, errstring) return jout @staticmethod -- 2.39.5