From 9f27bde0b9955219c9d483276e5e191e3a94d726 Mon Sep 17 00:00:00 2001 From: Venky Shankar Date: Fri, 3 May 2024 07:16:42 -0400 Subject: [PATCH] qa/cephfs: add test to verify backtrace update failure on deleted data pool Signed-off-by: Venky Shankar --- qa/tasks/cephfs/test_backtrace.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/qa/tasks/cephfs/test_backtrace.py b/qa/tasks/cephfs/test_backtrace.py index 6b094569b7b..cd23c114bfb 100644 --- a/qa/tasks/cephfs/test_backtrace.py +++ b/qa/tasks/cephfs/test_backtrace.py @@ -100,3 +100,29 @@ class TestBacktrace(CephFSTestCase): # we don't update the layout in all the old pools whenever it changes old_pool_layout = self.fs.read_layout(file_ino, pool=old_data_pool_name) self.assertEqual(old_pool_layout['object_size'], 4194304) + + def test_backtrace_flush_on_deleted_data_pool(self): + """ + that the MDS does not go read-only when handling backtrace update errors + when backtrace updates are batched and flushed to RADOS (during journal trim) + and some of the pool have been removed. + """ + data_pool = self.fs.get_data_pool_name() + extra_data_pool_name_1 = data_pool + '_extra1' + self.fs.add_data_pool(extra_data_pool_name_1) + + self.mount_a.run_shell(["mkdir", "dir_x"]) + self.mount_a.setfattr("dir_x", "ceph.dir.layout.pool", extra_data_pool_name_1) + self.mount_a.run_shell(["touch", "dir_x/file_x"]) + self.fs.flush() + + extra_data_pool_name_2 = data_pool + '_extra2' + self.fs.add_data_pool(extra_data_pool_name_2) + self.mount_a.setfattr("dir_x/file_x", "ceph.file.layout.pool", extra_data_pool_name_2) + self.mount_a.run_shell(["setfattr", "-x", "ceph.dir.layout", "dir_x"]) + self.run_ceph_cmd("fs", "rm_data_pool", self.fs.name, extra_data_pool_name_1) + self.fs.flush() + + # quick test to check if the mds has handled backtrace update failure + # on the deleted data pool without going read-only. + self.mount_a.run_shell(["mkdir", "dir_y"]) -- 2.39.5