From 623f1240a26b3e7237bdcc6a544395e69758d788 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 3 Mar 2017 10:16:52 +0000 Subject: [PATCH] tasks/cephfs: add TestStrays.test_replicated_delete_speed Reproducer for http://tracker.ceph.com/issues/16914 Signed-off-by: John Spray --- qa/tasks/cephfs/test_strays.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 3335d89dd90..87a04d73ecf 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -4,6 +4,8 @@ import logging from textwrap import dedent import datetime import gevent +import datetime + from teuthology.orchestra.run import CommandFailedError, Raw from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology @@ -1011,3 +1013,33 @@ class TestStrays(CephFSTestCase): self.fs.wait_for_daemons() time.sleep(10) self.assertEqual(self.get_stat("purge_queue", "pq_executed"), 0) + + def test_replicated_delete_speed(self): + """ + That deletions of replicated metadata are not pathologically slow + """ + rank_0_id, rank_1_id = self._setup_two_ranks() + + self.set_conf("mds.{0}".format(rank_1_id), 'mds_max_purge_files', "0") + self.mds_cluster.mds_fail_restart(rank_1_id) + self.fs.wait_for_daemons() + + file_count = 10 + + self.mount_a.create_n_files("delete_me/file", file_count) + + self._force_migrate(rank_1_id, "delete_me", + self.mount_a.path_to_ino("delete_me/file_0")) + + begin = datetime.datetime.now() + self.mount_a.run_shell(["rm", "-rf", Raw("delete_me/*")]) + end = datetime.datetime.now() + + # What we're really checking here is that we are completing client + # operations immediately rather than delaying until the next tick. + tick_period = float(self.fs.get_config("mds_tick_interval", + service_type="mds")) + + duration = (end - begin).total_seconds() + self.assertLess(duration, (file_count * tick_period) * 0.25) + -- 2.39.5