--- /dev/null
+tasks:
+ - check-counter:
+ counters:
+ mgr:
+ - name: "finisher-volumes.complete_latency.avgcount"
+ min: 4
+ - name: "finisher-volumes.queue_len"
+ expected_val: 0
+
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_volumes.TestPerModuleFinsherThread
--- /dev/null
+tasks:
+ - install:
+ - ceph:
+ wait-for-scrub: false
+ - check-counter:
+ counters:
+ mgr:
+ - name: "finisher-balancer.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-balancer.queue_len"
+ expected_val: 0
+ - name: "finisher-crash.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-crash.queue_len"
+ expected_val: 0
+ - name: "finisher-devicehealth.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-devicehealth.queue_len"
+ expected_val: 0
+ - name: "finisher-iostat.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-iostat.queue_len"
+ expected_val: 0
+ - name: "finisher-pg_autoscaler.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-pg_autoscaler.queue_len"
+ expected_val: 0
+ - name: "finisher-progress.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-progress.queue_len"
+ expected_val: 0
+ - name: "finisher-status.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-status.queue_len"
+ expected_val: 0
+ - name: "finisher-telemetry.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-telemetry.queue_len"
+ expected_val: 0
+ - workunit:
+ clients:
+ client.0:
+ - mgr/test_per_module_finisher.sh
- workunit:
clients:
client.0:
- - mgr
\ No newline at end of file
+ - mgr/test_localpool.sh
# remove group
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
+class TestPerModuleFinsherThread(TestVolumesHelper):
+ """
+ Per module finisher thread tests related to mgr/volume cmds.
+ This is used in conjuction with check_counter with min val being 4
+ as four subvolume cmds are run
+ """
+ def test_volumes_module_finisher_thread(self):
+ subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
+ group = self._generate_random_group_name()
+
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
+
+ # create subvolumes in group
+ self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
+ self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group)
+ self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group)
+
+ self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()
from teuthology.task import Task
from teuthology import misc
+from tasks import ceph_manager
+
log = logging.getLogger(__name__)
min: 3
- workunit: ...
"""
+ @property
+ def admin_remote(self):
+ first_mon = misc.get_first_mon(self.ctx, None)
+ (result,) = self.ctx.cluster.only(first_mon).remotes.keys()
+ return result
def start(self):
log.info("START")
if cluster_name is None:
cluster_name = next(iter(self.ctx.managers.keys()))
+
+ mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=self.ctx, logger=log.getChild('ceph_manager'))
+ active_mgr = json.loads(mon_manager.raw_cluster_cmd("mgr", "dump", "--format=json-pretty"))["active_name"]
+
for daemon_type, counters in targets.items():
# List of 'a', 'b', 'c'...
daemon_ids = list(misc.all_roles_of_type(self.ctx.cluster, daemon_type))
if not daemon.running():
log.info("Ignoring daemon {0}, it isn't running".format(daemon_id))
continue
+ elif daemon_type == 'mgr' and daemon_id != active_mgr:
+ continue
else:
log.debug("Getting stats from {0}".format(daemon_id))
log.warning("No admin socket response from {0}, skipping".format(daemon_id))
continue
+ minval = ''
+ expected_val = ''
for counter in counters:
if isinstance(counter, dict):
name = counter['name']
- minval = counter['min']
+ if 'min' in counter:
+ minval = counter['min']
+ if 'expected_val' in counter:
+ expected_val = counter['expected_val']
else:
name = counter
minval = 1
if val is not None:
log.info(f"Daemon {daemon_type}.{daemon_id} {name}={val}")
- if val >= minval:
+ if isinstance(minval, int) and val >= minval:
+ seen.add(name)
+ elif isinstance(expected_val, int) and val == expected_val:
seen.add(name)
if not dry_run:
--- /dev/null
+#!/usr/bin/env bash
+set -ex
+
+# This testcase tests the per module finisher stats for enabled modules
+# using check counter (qa/tasks/check_counter.py).
+
+# 'balancer' commands
+ceph balancer pool ls
+
+# 'crash' commands
+ceph crash ls
+ceph crash ls-new
+
+# 'device' commands
+ceph device query-daemon-health-metrics mon.a
+
+# 'iostat' command
+ceph iostat &
+pid=$!
+sleep 3
+kill -SIGTERM $pid
+
+# 'pg_autoscaler' command
+ceph osd pool autoscale-status
+
+# 'progress' command
+ceph progress
+ceph progress json
+
+# 'status' commands
+ceph fs status
+ceph osd status
+
+# 'telemetry' commands
+ceph telemetry status
+
+echo OK