mgr=self
)
- def add_nfs(self, spec):
- # type: (NFSServiceSpec) -> RookCompletion
- return self._service_add_decorate("NFS", spec,
- self.rook_cluster.add_nfsgw)
-
def _service_rm_decorate(self, typename, name, func):
return write_completion(
on_complete=lambda : func(name),
def apply_nfs(self, spec):
# type: (NFSServiceSpec) -> RookCompletion
- num = spec.placement.count
- return write_completion(
- lambda: self.rook_cluster.update_nfs_count(spec.service_id, num),
- "Updating NFS server count in {0} to {1}".format(spec.service_id, num),
- mgr=self
- )
+ return self._service_add_decorate("NFS", spec,
+ self.rook_cluster.apply_nfsgw)
def remove_daemons(self, names):
return write_completion(
from urllib3.exceptions import ProtocolError
from ceph.deployment.drive_group import DriveGroupSpec
-from ceph.deployment.service_spec import ServiceSpec
+from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec
from mgr_util import merge_dicts
try:
cos.CephObjectStore, 'cephobjectstores', name,
_update_zone, _create_zone)
- def add_nfsgw(self, spec):
+ def apply_nfsgw(self, spec: NFSServiceSpec) -> None:
# TODO use spec.placement
# TODO warn if spec.extended has entries we don't kow how
# to action.
+ # TODO Number of pods should be based on the list of hosts in the
+ # PlacementSpec.
+ count = spec.placement.count or 1
+ def _update_nfs(new: cnfs.CephNFS) -> cnfs.CephNFS:
+ new.spec.server.active = count
+ return new
- rook_nfsgw = cnfs.CephNFS(
- apiVersion=self.rook_env.api_name,
- metadata=dict(
- name=spec.service_id,
- namespace=self.rook_env.namespace,
- ),
- spec=cnfs.Spec(
- rados=cnfs.Rados(
- pool=spec.pool
- ),
- server=cnfs.Server(
- active=spec.placement.count
- )
- )
- )
+ def _create_nfs() -> cnfs.CephNFS:
+ rook_nfsgw = cnfs.CephNFS(
+ apiVersion=self.rook_env.api_name,
+ metadata=dict(
+ name=spec.service_id,
+ namespace=self.rook_env.namespace,
+ ),
+ spec=cnfs.Spec(
+ rados=cnfs.Rados(
+ pool=spec.pool
+ ),
+ server=cnfs.Server(
+ active=count
+ )
+ )
+ )
+
+ if spec.namespace:
+ rook_nfsgw.spec.rados.namespace = spec.namespace
- if spec.namespace:
- rook_nfsgw.spec.rados.namespace = spec.namespace
+ return rook_nfsgw
- with self.ignore_409("NFS cluster '{0}' already exists".format(spec.service_id)):
- self.rook_api_post("cephnfses/", body=rook_nfsgw.to_json())
+ return self._create_or_patch(cnfs.CephNFS, 'cephnfses', spec.service_id,
+ _update_nfs, _create_nfs)
def rm_service(self, rooktype, service_id):
return new
return self._patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _update_mon_count)
- def update_nfs_count(self, svc_id, newcount):
- def _update_nfs_count(current, new):
- # type: (cnfs.CephNFS, cnfs.CephNFS) -> cnfs.CephNFS
- new.spec.server.active = newcount
- return new
- return self._patch(cnfs.CephNFS, 'cephnfses',svc_id, _update_nfs_count)
-
def add_osds(self, drive_group, matching_hosts):
# type: (DriveGroupSpec, List[str]) -> str
"""