]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
tasks/cephfs: override max_pgs_per_osd in TestVolumeClient
authorJohn Spray <john.spray@redhat.com>
Thu, 7 Jan 2016 16:47:26 +0000 (16:47 +0000)
committerJohn Spray <john.spray@redhat.com>
Thu, 14 Jan 2016 22:55:29 +0000 (22:55 +0000)
Because volume client cues its PG counts from the
mon pg warn max per osd setting, the teuthology template's
ultra-high 10000 setting causes problems.  Force it down
to the usual sensible default for this test.

Also update vstart_runner to populate ctx.daemons so that the test
can restart daemons the same way on vstart as in full blown teuthology.

For branch wip-manila

Signed-off-by: John Spray <john.spray@redhat.com>
tasks/cephfs/test_volume_client.py
tasks/cephfs/vstart_runner.py

index bfc211747c01dd786bc73efec0b9b693bcca0439..e537488bd556e016a0f400bff02b3c957eee0ddf 100644 (file)
@@ -219,6 +219,15 @@ vc.disconnect()
         That data isolated shares get their own pool
         :return:
         """
+
+        # Because the teuthology config template sets mon_pg_warn_max_per_osd to
+        # 10000 (i.e. it just tries to ignore health warnings), reset it to something
+        # sane before using volume_client, to avoid creating pools with absurdly large
+        # numbers of PGs.
+        self.set_conf("global", "mon pg warn max per osd", "300")
+        for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
+            mon_daemon_state.restart()
+
         self.mount_b.umount_wait()
         self._configure_vc_auth(self.mount_b, "manila")
 
index 3b90afb78de1b3753a0dd3dd7098502ae573f6da..5242c44a653ea84dec6a5a191eb6472f4569f0ee 100644 (file)
@@ -35,6 +35,7 @@ import errno
 from unittest import suite
 import unittest
 from teuthology.orchestra.run import Raw, quote
+from teuthology.orchestra.daemon import DaemonGroup
 
 import logging
 
@@ -570,23 +571,13 @@ class LocalFilesystem(Filesystem):
 
         self.admin_remote = LocalRemote()
 
-        # Hack: cheeky inspection of ceph.conf to see what MDSs exist
-        self.mds_ids = set()
-        for line in open("ceph.conf").readlines():
-            match = re.match("^\[mds\.(.+)\]$", line)
-            if match:
-                self.mds_ids.add(match.group(1))
-
+        self.mds_ids = ctx.daemons.daemons['mds'].keys()
         if not self.mds_ids:
             raise RuntimeError("No MDSs found in ceph.conf!")
 
-        self.mds_ids = list(self.mds_ids)
-
-        log.info("Discovered MDS IDs: {0}".format(self.mds_ids))
-
         self.mon_manager = LocalCephManager()
 
-        self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids])
+        self.mds_daemons = ctx.daemons.daemons["mds"]
 
         self.client_remote = LocalRemote()
 
@@ -734,6 +725,19 @@ def exec_test():
                 'test_path': test_dir
             }
             self.cluster = LocalCluster()
+            self.daemons = DaemonGroup()
+
+            # Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any
+            # tests that want to look these up via ctx can do so.
+            # Inspect ceph.conf to see what roles exist
+            for conf_line in open("ceph.conf").readlines():
+                for svc_type in ["mon", "osd", "mds"]:
+                    if svc_type not in self.daemons.daemons:
+                        self.daemons.daemons[svc_type] = {}
+                    match = re.match("^\[{0}\.(.+)\]$".format(svc_type), conf_line)
+                    if match:
+                        svc_id = match.group(1)
+                        self.daemons.daemons[svc_type][svc_id] = LocalDaemon(svc_type, svc_id)
 
         def __del__(self):
             shutil.rmtree(self.teuthology_config['test_path'])