]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: set the 'bulk' flag for data pools created automatically for a new volume
authorLeonid Usov <leonid.usov@ibm.com>
Thu, 3 Aug 2023 16:41:12 +0000 (19:41 +0300)
committerLeonid Usov <leonid.usov@ibm.com>
Thu, 3 Aug 2023 16:41:12 +0000 (19:41 +0300)
Signed-off-by: Leonid Usov <leonid.usov@ibm.com>
Fixes: https://tracker.ceph.com/issues/61595
qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/fs_util.py
src/pybind/mgr/volumes/fs/operations/volume.py

index f5f5c15fba1334251c7e180e5cfdf2f7ad51242b..27a7e6f5ba60a17d6a1c59c08dcc7371d93113de 100644 (file)
@@ -442,9 +442,21 @@ class TestVolumes(TestVolumesHelper):
 
         if not (volname in ([volume['name'] for volume in volumels])):
             raise RuntimeError("Error creating volume '{0}'".format(volname))
-        else:
-            # clean up
-            self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
+
+        # check that the pools were created with the correct config
+        pool_details = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json"))
+        pool_flags = {}
+        for pool in pool_details:
+            pool_flags[pool["pool_id"]] = pool["flags_names"].split(",")
+
+        volume_details = json.loads(self._fs_cmd("get", volname, "--format=json"))
+        for data_pool_id in volume_details['mdsmap']['data_pools']:
+            self.assertIn("bulk", pool_flags[data_pool_id])
+        meta_pool_id = volume_details['mdsmap']['metadata_pool']
+        self.assertNotIn("bulk", pool_flags[meta_pool_id])
+
+        # clean up
+        self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
 
     def test_volume_ls(self):
         """
index be0a06acdd8997d2d7de06dffe653038e6d58c39..0b9382fc3e138920022015cb0b2f0306d98a8d77 100644 (file)
@@ -11,9 +11,10 @@ from .exception import VolumeException
 
 log = logging.getLogger(__name__)
 
-def create_pool(mgr, pool_name):
+def create_pool(mgr, pool_name, **extra_args):
     # create the given pool
-    command = {'prefix': 'osd pool create', 'pool': pool_name}
+    command = extra_args
+    command.update({'prefix': 'osd pool create', 'pool': pool_name})
     return mgr.mon_command(command)
 
 def remove_pool(mgr, pool_name):
index e6e374992fb0bf6bd090a117bfaf870743f7cfed..5cebe5c3143bd83dc36586b2080219fa209244fa 100644 (file)
@@ -80,7 +80,9 @@ def create_volume(mgr, volname, placement):
     r, outb, outs = create_pool(mgr, metadata_pool)
     if r != 0:
         return r, outb, outs
-    r, outb, outs = create_pool(mgr, data_pool)
+    # default to a bulk pool for data. In case autoscaling has been disabled
+    # for the cluster with `ceph osd pool set noautoscale`, this will have no effect.
+    r, outb, outs = create_pool(mgr, data_pool, bulk=True)
     if r != 0:
         #cleanup
         remove_pool(mgr, metadata_pool)