]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/mgr: add tests for sqlite autocommit
authorPatrick Donnelly <pdonnell@redhat.com>
Fri, 19 Apr 2024 03:35:05 +0000 (23:35 -0400)
committerPatrick Donnelly <pdonnell@ibm.com>
Tue, 25 Feb 2025 16:17:15 +0000 (11:17 -0500)
That autocommit is properly turned off and that commits via context managers
work as expected.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
(cherry picked from commit fb82b6d35a734cbf3c27e4dbc5e6eb2eceb30759)

qa/suites/rados/mgr/tasks/1-install.yaml
qa/suites/rados/mgr/tasks/4-units/devicehealth.yaml
qa/tasks/mgr/mgr_test_case.py
qa/tasks/mgr/test_devicehealth.py

index 6c48c5275a05ca4a2c1a6662b491f783127f8b90..6dfb420822c75752d0db568bf85e505258ce832f 100644 (file)
@@ -1,2 +1,7 @@
 tasks:
   - install:
+      extra_system_packages:
+        rpm:
+          - sqlite-devel
+        deb:
+          - sqlite3
index 6f337d83c2017a073c8b0d188d5ef2fc2b04930d..7ffaeb0e0099a19217ffc71fb566f0c8d21faddc 100644 (file)
@@ -10,6 +10,9 @@ overrides:
       mgr:
         debug ms: 1
         debug cephsqlite: 20
+      client:
+        debug ms: 1
+        debug cephsqlite: 20
 
 tasks:
   - cephfs_test_runner:
index aa5bc6e56a9fac8ffa48178af422d43a4b5704ee..97d55d22387086b0ddabccf3fddf214badc7f97f 100644 (file)
@@ -35,6 +35,17 @@ class MgrCluster(CephCluster):
         else:
             self.mon_manager.raw_cluster_cmd("mgr", "fail", mgr_id)
 
+    def set_down(self, yes='true'):
+        self.mon_manager.raw_cluster_cmd('mgr', 'set', 'down', str(yes))
+
+    def mgr_tell(self, *args, mgr_id=None, mgr_map=None):
+        if mgr_id is None:
+            if mgr_map is None:
+                mgr_map = self.get_mgr_map()
+            mgr_id = self.get_active_id(mgr_map=mgr_map)
+        J = self.mon_manager.raw_cluster_cmd("tell", f"mgr.{mgr_id}", *args)
+        return json.loads(J)
+
     def mgr_restart(self, mgr_id):
         self.mgr_daemons[mgr_id].restart()
 
@@ -50,11 +61,20 @@ class MgrCluster(CephCluster):
                 return c['addrvec']
         return None
 
-    def get_active_id(self):
-        return self.get_mgr_map()["active_name"]
+    def get_active_gid(self, mgr_map = None):
+        if mgr_map is None:
+            mgr_map = self.get_mgr_map()
+        return mgr_map["active_gid"]
+
+    def get_active_id(self, mgr_map = None):
+        if mgr_map is None:
+            mgr_map = self.get_mgr_map()
+        return mgr_map["active_name"]
 
-    def get_standby_ids(self):
-        return [s['name'] for s in self.get_mgr_map()["standbys"]]
+    def get_standby_ids(self, mgr_map = None):
+        if mgr_map is None:
+            mgr_map = self.get_mgr_map()
+        return [s['name'] for s in mgr_map["standbys"]]
 
     def set_module_conf(self, module, key, val):
         self.mon_manager.raw_cluster_cmd("config", "set", "mgr",
index d3aa33fc0951ed8c645f69faaf83f64250a34a1b..a02a61fba239af2d2c9e597a0505cba3f749e643 100644 (file)
@@ -1,6 +1,8 @@
 from io import StringIO
 import logging
 
+from teuthology.exceptions import CommandFailedError
+
 from .mgr_test_case import MgrTestCase
 
 log = logging.getLogger(__name__)
@@ -14,10 +16,14 @@ class TestDeviceHealth(MgrTestCase):
         self.setup_mgrs()
 
     def tearDown(self):
-        self.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'set', 'down', 'true')
-        self.mgr_cluster.mon_manager.raw_cluster_cmd('config', 'set', 'mon', 'mon_allow_pool_delete', 'true')
+        self.mgr_cluster.set_down()
+        self.remove_mgr_pool()
+        self.mgr_cluster.set_down(yes='false')
+        return super(TestDeviceHealth, self).tearDown()
+
+    def remove_mgr_pool(self):
+        self.config_set('mon', 'mon_allow_pool_delete', 'true')
         self.mgr_cluster.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', '.mgr', '.mgr', '--yes-i-really-really-mean-it-not-faking')
-        self.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'set', 'down', 'false')
 
     def test_legacy_upgrade_snap(self):
         """
@@ -31,3 +37,70 @@ class TestDeviceHealth(MgrTestCase):
 
         with self.assert_cluster_log("Unhandled exception from module 'devicehealth' while running", present=False):
             self.wait_until_true(lambda: self.mgr_cluster.get_active_id() is not None, timeout=60)
+
+    def _wait_for_killpoint_death(self):
+        # wait for killpoint trigger to kill a mgr
+        def killpoint_dead():
+            for mgr_id, mgr_daemon in self.mgr_cluster.mgr_daemons.items():
+                log.info(f"{mgr_id}")
+                try:
+                    s = mgr_daemon.check_status()
+                    if s is None:
+                        continue
+                    log.info(f"{s}")
+                except CommandFailedError as e:
+                    log.info(f"{e}")
+                    if e.exitstatus == 120:
+                        return True
+                    pass
+            return False
+
+        self.wait_until_true(killpoint_dead, timeout=30)
+        self.mgr_cluster.set_down()
+
+    def test_sql_commit(self):
+        """
+        That commits work.
+        """
+
+        self.mgr_cluster.set_down()
+        self.config_set('mgr', 'mgr/devicehealth/sqlite3_killpoint', 3)
+        self.remove_mgr_pool()
+        self.mgr_cluster.set_down(yes='false')
+
+        self._wait_for_killpoint_death()
+
+        script = """
+            export CEPH_ARGS='--id admin --no-log-to-stderr'
+            sqlite3 -cmd '.load libcephsqlite.so' -cmd '.open file:///.mgr:devicehealth/main.db?vfs=ceph' <<<'.schema'
+        """
+        p = self.mon_manager.controller.run(args=['bash'], stdin=StringIO(script), stdout=StringIO())
+        schema = p.stdout.getvalue().strip()
+        self.assertIn("TABLE MgrModuleKV", schema)
+        self.assertIn("TABLE Device", schema)
+
+    def _test_sql_autocommit(self, kv):
+        """
+        That autocommit transactions is off.
+        """
+
+        self.mgr_cluster.set_down()
+        self.config_set('mgr', 'mgr/devicehealth/sqlite3_killpoint', kv)
+        self.remove_mgr_pool()
+        self.mgr_cluster.set_down(yes='false')
+
+        self._wait_for_killpoint_death()
+
+        script = """
+            export CEPH_ARGS='--id admin --no-log-to-stderr'
+            sqlite3 -cmd '.load libcephsqlite.so' -cmd '.open file:///.mgr:devicehealth/main.db?vfs=ceph' <<<'.schema'
+        """
+        p = self.mon_manager.controller.run(args=['bash'], stdin=StringIO(script), stdout=StringIO())
+        schema = p.stdout.getvalue().strip()
+        self.assertEqual("", schema)
+
+    def test_sql_autocommit1(self):
+        return self._test_sql_autocommit(1)
+
+    def test_sql_autocommit2(self):
+        return self._test_sql_autocommit(2)