]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/volumes: add arg to fs volume create for mds daemons placement
authorDaniel-Pivonka <dpivonka@redhat.com>
Thu, 13 Feb 2020 18:56:13 +0000 (13:56 -0500)
committerDaniel-Pivonka <dpivonka@redhat.com>
Thu, 20 Feb 2020 19:46:17 +0000 (14:46 -0500)
add placement arg for mds daemons created by fs volume create

Signed-off-by: Daniel-Pivonka <dpivonka@redhat.com>
doc/cephfs/fs-volumes.rst
src/pybind/mgr/volumes/fs/fs_util.py
src/pybind/mgr/volumes/fs/operations/volume.py
src/pybind/mgr/volumes/fs/volume.py
src/pybind/mgr/volumes/module.py

index 18404ecc80b4f5141c929813f6fc5f2120e7e930..6cd5e7c15dd9e10a2cf481b63dcfa8c7f0f59418 100644 (file)
@@ -43,7 +43,7 @@ FS Volumes
 
 Create a volume using::
 
-    $ ceph fs volume create <vol_name>
+    $ ceph fs volume create <vol_name> [<placement>]
 
 This creates a CephFS file system and its data and metadata pools. It also tries
 to create MDSes for the filesystem using the enabled ceph-mgr orchestrator
index bf3fd751864a882c003aac49790fc1b11e121087..c0e7e338aed2116da9c13974bff9223465dfef9b 100644 (file)
@@ -33,8 +33,8 @@ def remove_filesystem(mgr, fs_name):
     command = {'prefix': 'fs rm', 'fs_name': fs_name, 'yes_i_really_mean_it': True}
     return mgr.mon_command(command)
 
-def create_mds(mgr, fs_name):
-    spec = orchestrator.ServiceSpec(fs_name)
+def create_mds(mgr, fs_name, placement):
+    spec = orchestrator.ServiceSpec(fs_name, orchestrator.PlacementSpec.from_strings(placement.split()))
     try:
         completion = mgr.apply_mds(spec)
         mgr._orchestrator_wait([completion])
index 7e6bf2b15a591d4bc0dc59a16cdd2640d00e49f2..d3b295ba5db77d00cbb3620f5cc995a1670f234a 100644 (file)
@@ -197,7 +197,7 @@ def gen_pool_names(volname):
     """
     return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
 
-def create_volume(mgr, volname):
+def create_volume(mgr, volname, placement):
     """
     create volume  (pool, filesystem and mds)
     """
@@ -220,7 +220,7 @@ def create_volume(mgr, volname):
         remove_pool(metadata_pool)
         return r, outb, outs
     # create mds
-    return create_mds(mgr, volname)
+    return create_mds(mgr, volname, placement)
 
 def delete_volume(mgr, volname):
     """
index a10488d12923da3f82e770c9daf8bd677f19380c..798f1f07b9a96c57284a388ed3afe57997a4eaa0 100644 (file)
@@ -83,10 +83,10 @@ class VolumeClient(object):
 
     ### volume operations -- create, rm, ls
 
-    def create_fs_volume(self, volname):
+    def create_fs_volume(self, volname, placement):
         if self.is_stopping():
             return -errno.ESHUTDOWN, "", "shutdown in progress"
-        return create_volume(self.mgr, volname)
+        return create_volume(self.mgr, volname, placement)
 
     def delete_fs_volume(self, volname, confirm):
         if self.is_stopping():
index bbde50080131f3f2d8bb5bd6a06cb7aee20a29d7..ae3033b7bc6661c33f5dd0fb1f39cd7f6f9d8cf0 100644 (file)
@@ -15,7 +15,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
         },
         {
             'cmd': 'fs volume create '
-                   'name=name,type=CephString ',
+                   'name=name,type=CephString '
+                   'name=placement,type=CephString,req=false ',
             'desc': "Create a CephFS volume",
             'perm': 'rw'
         },
@@ -242,7 +243,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
 
     def _cmd_fs_volume_create(self, inbuf, cmd):
         vol_id = cmd['name']
-        return self.vc.create_fs_volume(vol_id)
+        placement = cmd.get('placement', None)
+        return self.vc.create_fs_volume(vol_id, placement)
 
     def _cmd_fs_volume_rm(self, inbuf, cmd):
         vol_name = cmd['vol_name']