%{_datadir}/ceph/mgr/telemetry
%{_datadir}/ceph/mgr/test_orchestrator
%{_datadir}/ceph/mgr/volumes
+%{_datadir}/ceph/mgr/nvmeof
%files mgr-rook
%{_datadir}/ceph/mgr/rook
usr/share/ceph/mgr/telemetry
usr/share/ceph/mgr/test_orchestrator
usr/share/ceph/mgr/volumes
+usr/share/ceph/mgr/nvmeof
first started after installation, to populate the list of enabled manager modules. Subsequent
updates are done using the 'mgr module [enable|disable]' commands. List may be
comma or space separated.
- default: iostat nfs
+ default: iostat nfs nvmeof
services:
- mon
- common
devicehealth
diskprediction_local
# hello is an example for developers, not for user
+ nvmeof
influx
insights
iostat
--- /dev/null
+# flake8: noqa
+from .module import NVMeoF
--- /dev/null
+import logging
+from typing import Any, Tuple
+
+from mgr_module import MgrModule, Option
+import rbd
+
+logger = logging.getLogger(__name__)
+
+POOL_NAME = ".nvmeof"
+PG_NUM = 1
+
+class NVMeoF(MgrModule):
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super(NVMeoF, self).__init__(*args, **kwargs)
+
+ def _print_log(self, ret: int, out: str, err: str, cmd: str) -> None:
+ logger.info(f"logging command: {cmd} *** ret:{str(ret)}, out:{out}, err: {err}")
+
+ def _mon_cmd(self, cmd: dict) -> Tuple[int, str, str]:
+ ret, out, err = self.mon_command(cmd)
+ self._print_log(ret, out, err, cmd)
+ if ret != 0:
+ raise RuntimeError(f"mon_command failed: {cmd}, ret={ret}, out={out}, err={err}")
+ return ret, out, err
+
+ def _pool_exists(self, pool_name: str) -> bool:
+ logger.info(f"checking if pool {pool_name} exists")
+ pool_exists = self.rados.pool_exists(pool_name)
+ if pool_exists:
+ logger.info(f"pool {pool_name} already exists")
+ else:
+ logger.info(f"pool {pool_name} doesn't exist")
+ return pool_exists
+
+ def _create_pool(self, pool_name: str, pg_num: int) -> None:
+ create_cmd = {
+ 'prefix': 'osd pool create',
+ 'pool': pool_name,
+ 'pg_num': pg_num,
+ 'pool_type': 'replicated',
+ 'yes_i_really_mean_it': True
+ }
+ try:
+ self._mon_cmd(create_cmd)
+ logger.info(f"Pool '{pool_name}' created.")
+ except RuntimeError as e:
+ logger.error(f"Error creating pool '{pool_name}", exc_info=True)
+ raise
+
+ def _enable_rbd_application(self, pool_name: str) -> None:
+ cmd = {
+ 'prefix': 'osd pool application enable',
+ 'pool': pool_name,
+ 'app': 'rbd',
+ }
+ try:
+ self._mon_cmd(cmd)
+ logger.info(f"'rbd' application enabled on pool '{pool_name}'.")
+ except RuntimeError as e:
+ logger.error(
+ f"Failed to enable 'rbd' application on '{pool_name}'",
+ exc_info=True
+ )
+ raise
+
+ def _rbd_pool_init(self, pool_name: str) -> None:
+ with self.rados.open_ioctx(pool_name) as ioctx:
+ rbd.RBD().pool_init(ioctx, False)
+ logger.info(f"RBD pool_init completed on '{pool_name}'.")
+
+ def create_pool_if_not_exists(self) -> None:
+ if not self._pool_exists(POOL_NAME):
+ self._create_pool(POOL_NAME, PG_NUM)
+ self._enable_rbd_application(POOL_NAME)
+ self._rbd_pool_init(POOL_NAME)
+
--- /dev/null
+
+from contextlib import contextmanager
+from unittest.mock import MagicMock
+
+import nvmeof.module as nvmeof_mod
+
+
+class FakeRados:
+ def __init__(self, exists: bool):
+ self._exists = exists
+ self.opened_pools = []
+
+ def pool_exists(self, pool_name: str) -> bool:
+ return self._exists
+
+ @contextmanager
+ def open_ioctx(self, pool_name: str):
+ self.opened_pools.append(pool_name)
+ yield object()
+
+
+def patch_rbd_pool_init(monkeypatch):
+ rbd_instance = MagicMock()
+ monkeypatch.setattr(nvmeof_mod.rbd, "RBD", lambda: rbd_instance)
+ return rbd_instance
+
+
+def make_mgr(mon_handler, exists: bool, monkeypatch):
+ mgr = nvmeof_mod.NVMeoF.__new__(nvmeof_mod.NVMeoF)
+ mgr.mon_command = mon_handler
+ mgr._print_log = lambda *args, **kwargs: None
+ mgr.run = False
+
+ mgr._fake_rados = FakeRados(exists)
+
+ def _pool_exists(self, pool_name: str) -> bool:
+ return self._fake_rados.pool_exists(pool_name)
+
+ def _rbd_pool_init(self, pool_name: str):
+ with self._fake_rados.open_ioctx(pool_name) as ioctx:
+ nvmeof_mod.rbd.RBD().pool_init(ioctx, False)
+
+ monkeypatch.setattr(nvmeof_mod.NVMeoF, "_pool_exists", _pool_exists, raising=True)
+ monkeypatch.setattr(nvmeof_mod.NVMeoF, "_rbd_pool_init", _rbd_pool_init, raising=True)
+
+ return mgr
+
+
+def test_pool_exists_skips_create_calls_enable_and_pool_init(monkeypatch):
+ calls = []
+
+ def mon_command(cmd):
+ calls.append(cmd)
+ return 0, "", ""
+
+ rbd_instance = patch_rbd_pool_init(monkeypatch)
+ mgr = make_mgr(mon_command, exists=True, monkeypatch=monkeypatch)
+
+ mgr.create_pool_if_not_exists()
+
+ assert not any(c.get("prefix") == "osd pool create" for c in calls)
+ assert any(c.get("prefix") == "osd pool application enable" for c in calls)
+
+ assert mgr._fake_rados.opened_pools == [".nvmeof"]
+ rbd_instance.pool_init.assert_called_once()
+
+
+def test_pool_missing_creates_then_enables_then_pool_init(monkeypatch):
+ calls = []
+
+ def mon_command(cmd):
+ calls.append(cmd)
+ return 0, "", ""
+
+ rbd_instance = patch_rbd_pool_init(monkeypatch)
+ mgr = make_mgr(mon_command, exists=False, monkeypatch=monkeypatch)
+
+ mgr.create_pool_if_not_exists()
+
+ assert any(c.get("prefix") == "osd pool create" for c in calls)
+ assert any(c.get("prefix") == "osd pool application enable" for c in calls)
+
+ assert mgr._fake_rados.opened_pools == [".nvmeof"]
+ rbd_instance.pool_init.assert_called_once()
return self._apply_misc([spec], dry_run, format, no_overwrite)
+ def _create_nvmeof_metadata_pool_if_needed(self) -> None:
+ self.remote('nvmeof', 'create_pool_if_not_exists')
+
@OrchestratorCLICommand.Write('orch apply nvmeof')
def _apply_nvmeof(self,
- pool: str,
- group: str,
+ _end_positional_: int = 0,
+ pool: str = ".nvmeof",
+ group: str = '',
placement: Optional[str] = None,
unmanaged: bool = False,
dry_run: bool = False,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Scale an nvmeof service"""
+ if group == '':
+ raise OrchestratorValidationError('The --group argument is required')
+
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
+ if pool == ".nvmeof":
+ self._create_nvmeof_metadata_pool_if_needed()
+
+ cleanpool = pool.lstrip('.')
spec = NvmeofServiceSpec(
- service_id=f'{pool}.{group}' if group else pool,
+ service_id=f'{cleanpool}.{group}' if group else cleanpool,
pool=pool,
group=group,
placement=PlacementSpec.from_string(placement),
-m devicehealth \
-m diskprediction_local \
-m hello \
+ -m nvmeof \
-m influx \
-m iostat \
-m localpool \
devicehealth \
diskprediction_local \
hello \
+ nvmeof \
iostat \
localpool \
mgr_module.py \