From ff5475f0cc663f4bb3a57e7c63672ac894b93c39 Mon Sep 17 00:00:00 2001 From: Douglas Fuller Date: Wed, 12 Jul 2017 10:41:11 -0500 Subject: [PATCH] qa/ceph_test_case: support CephFS recovery pools Add support for testing recovery of CephFS metadata into an alternate RADOS pool, useful as a disaster recovery mechanism that avoids modifying the metadata in-place. Signed-off-by: Douglas Fuller (cherry picked from commit c85562c94a80b8a18975b8d0ee6a7fbd932cf024) --- qa/tasks/ceph_test_case.py | 1 + qa/tasks/cephfs/cephfs_test_case.py | 1 + 2 files changed, 2 insertions(+) diff --git a/qa/tasks/ceph_test_case.py b/qa/tasks/ceph_test_case.py index 0f66106c9f1f0..5767df4611de9 100644 --- a/qa/tasks/ceph_test_case.py +++ b/qa/tasks/ceph_test_case.py @@ -18,6 +18,7 @@ class CephTestCase(unittest.TestCase): # Environment references mounts = None fs = None + recovery_fs = None ceph_cluster = None mds_cluster = None mgr_cluster = None diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 1181c80048fb2..d3c1154ca86aa 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -105,6 +105,7 @@ class CephFSTestCase(CephTestCase): self.mds_cluster.mds_fail() self.mds_cluster.delete_all_filesystems() self.fs = None # is now invalid! + self.recovery_fs = None # In case the previous filesystem had filled up the RADOS cluster, wait for that # flag to pass. -- 2.39.5