]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/nvmeof: introduce nvmeof module and create .nvmeof rbd pool on orch nvmeof apply
authorTomer Haskalovitch <tomer.haska@ibm.com>
Sun, 23 Nov 2025 07:46:30 +0000 (09:46 +0200)
committerTomer Haskalovitch <tomer.haska@ibm.com>
Thu, 19 Feb 2026 01:16:36 +0000 (03:16 +0200)
Introduce a new NVMe-oF mgr module and which create the pool
used for storing NVMe-related metadata ceph orch nvmeof apply command.
This removes the need for users to manually create and configure the
metadata pool before using the NVMe-oF functionality, simplifying
setup and reducing the chance of misconfiguration.
Signed-off-by: Tomer Haskalovitch <tomer.haska@ibm.com>
ceph.spec.in
debian/ceph-mgr-modules-core.install
src/common/options/mgr.yaml.in
src/pybind/mgr/CMakeLists.txt
src/pybind/mgr/nvmeof/__init__.py [new file with mode: 0644]
src/pybind/mgr/nvmeof/module.py [new file with mode: 0644]
src/pybind/mgr/nvmeof/tests/__init__.py [new file with mode: 0644]
src/pybind/mgr/nvmeof/tests/test_nvmeof_module.py [new file with mode: 0644]
src/pybind/mgr/orchestrator/module.py
src/pybind/mgr/tox.ini

index 18cd57b26833e0181627f59ff6f3c901c0b6d298..f0f6a50c56daa09326736bf7d10fdb71dace3195 100644 (file)
@@ -2015,6 +2015,7 @@ fi
 %{_datadir}/ceph/mgr/telemetry
 %{_datadir}/ceph/mgr/test_orchestrator
 %{_datadir}/ceph/mgr/volumes
+%{_datadir}/ceph/mgr/nvmeof
 
 %files mgr-rook
 %{_datadir}/ceph/mgr/rook
index 90359a8e3e73f7e3f3fe45e7be845dfea15133d4..17ed984b02f613b4dde38d55924d7979520cb35f 100644 (file)
@@ -25,3 +25,4 @@ usr/share/ceph/mgr/telegraf
 usr/share/ceph/mgr/telemetry
 usr/share/ceph/mgr/test_orchestrator
 usr/share/ceph/mgr/volumes
+usr/share/ceph/mgr/nvmeof
index c6bdee1d156dad88ea09473ce6d421396a24090c..4a21104c47110ef02a7b48350ef70dcb6d772a87 100644 (file)
@@ -153,7 +153,7 @@ options:
     first started after installation, to populate the list of enabled manager modules.  Subsequent
     updates are done using the 'mgr module [enable|disable]' commands.  List may be
     comma or space separated.
-  default: iostat nfs
+  default: iostat nfs nvmeof
   services:
   - mon
   - common
index 9e900f859d701c1536b98266772eaff008ee3327..d4032eb1dd7c227e33669e50448cba3134a73e73 100644 (file)
@@ -27,6 +27,7 @@ set(mgr_modules
   devicehealth
   diskprediction_local
   # hello is an example for developers, not for user
+  nvmeof
   influx
   insights
   iostat
diff --git a/src/pybind/mgr/nvmeof/__init__.py b/src/pybind/mgr/nvmeof/__init__.py
new file mode 100644 (file)
index 0000000..33b8e63
--- /dev/null
@@ -0,0 +1,2 @@
+# flake8: noqa
+from .module import NVMeoF
diff --git a/src/pybind/mgr/nvmeof/module.py b/src/pybind/mgr/nvmeof/module.py
new file mode 100644 (file)
index 0000000..cc62d70
--- /dev/null
@@ -0,0 +1,76 @@
+import logging
+from typing import Any, Tuple
+
+from mgr_module import MgrModule, Option
+import rbd
+
+logger = logging.getLogger(__name__)
+
+POOL_NAME = ".nvmeof"
+PG_NUM = 1
+
+class NVMeoF(MgrModule):
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super(NVMeoF, self).__init__(*args, **kwargs)
+
+    def _print_log(self, ret: int, out: str, err: str, cmd: str) -> None:
+        logger.info(f"logging command: {cmd} *** ret:{str(ret)}, out:{out}, err: {err}")
+
+    def _mon_cmd(self, cmd: dict) -> Tuple[int, str, str]:
+        ret, out, err = self.mon_command(cmd)
+        self._print_log(ret, out, err, cmd)
+        if ret != 0:
+            raise RuntimeError(f"mon_command failed: {cmd}, ret={ret}, out={out}, err={err}")
+        return ret, out, err
+
+    def _pool_exists(self, pool_name: str) -> bool:
+        logger.info(f"checking if pool {pool_name} exists")
+        pool_exists = self.rados.pool_exists(pool_name)
+        if pool_exists:
+            logger.info(f"pool {pool_name} already exists")
+        else:
+            logger.info(f"pool {pool_name} doesn't exist")
+        return pool_exists
+
+    def _create_pool(self, pool_name: str, pg_num: int) -> None:
+        create_cmd = {
+            'prefix': 'osd pool create',
+            'pool': pool_name,
+            'pg_num': pg_num,
+            'pool_type': 'replicated',
+            'yes_i_really_mean_it': True
+        }
+        try:
+            self._mon_cmd(create_cmd)
+            logger.info(f"Pool '{pool_name}' created.")
+        except RuntimeError as e:
+            logger.error(f"Error creating pool '{pool_name}", exc_info=True)
+            raise
+
+    def _enable_rbd_application(self, pool_name: str) -> None:
+        cmd = {
+            'prefix': 'osd pool application enable',
+            'pool': pool_name,
+            'app': 'rbd',
+        }
+        try:
+            self._mon_cmd(cmd)
+            logger.info(f"'rbd' application enabled on pool '{pool_name}'.")
+        except RuntimeError as e:
+            logger.error(
+                f"Failed to enable 'rbd' application on '{pool_name}'",
+                exc_info=True
+            )
+            raise
+
+    def _rbd_pool_init(self, pool_name: str) -> None:
+        with self.rados.open_ioctx(pool_name) as ioctx:
+            rbd.RBD().pool_init(ioctx, False)
+        logger.info(f"RBD pool_init completed on '{pool_name}'.")
+
+    def create_pool_if_not_exists(self) -> None:
+        if not self._pool_exists(POOL_NAME):
+            self._create_pool(POOL_NAME, PG_NUM)
+        self._enable_rbd_application(POOL_NAME)
+        self._rbd_pool_init(POOL_NAME)
+
diff --git a/src/pybind/mgr/nvmeof/tests/__init__.py b/src/pybind/mgr/nvmeof/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/pybind/mgr/nvmeof/tests/test_nvmeof_module.py b/src/pybind/mgr/nvmeof/tests/test_nvmeof_module.py
new file mode 100644 (file)
index 0000000..28eaf4b
--- /dev/null
@@ -0,0 +1,84 @@
+
+from contextlib import contextmanager
+from unittest.mock import MagicMock
+
+import nvmeof.module as nvmeof_mod
+
+
+class FakeRados:
+    def __init__(self, exists: bool):
+        self._exists = exists
+        self.opened_pools = []
+
+    def pool_exists(self, pool_name: str) -> bool:
+        return self._exists
+
+    @contextmanager
+    def open_ioctx(self, pool_name: str):
+        self.opened_pools.append(pool_name)
+        yield object()
+
+
+def patch_rbd_pool_init(monkeypatch):
+    rbd_instance = MagicMock()
+    monkeypatch.setattr(nvmeof_mod.rbd, "RBD", lambda: rbd_instance)
+    return rbd_instance
+
+
+def make_mgr(mon_handler, exists: bool, monkeypatch):
+    mgr = nvmeof_mod.NVMeoF.__new__(nvmeof_mod.NVMeoF)
+    mgr.mon_command = mon_handler
+    mgr._print_log = lambda *args, **kwargs: None
+    mgr.run = False
+
+    mgr._fake_rados = FakeRados(exists)
+
+    def _pool_exists(self, pool_name: str) -> bool:
+        return self._fake_rados.pool_exists(pool_name)
+
+    def _rbd_pool_init(self, pool_name: str):
+        with self._fake_rados.open_ioctx(pool_name) as ioctx:
+            nvmeof_mod.rbd.RBD().pool_init(ioctx, False)
+
+    monkeypatch.setattr(nvmeof_mod.NVMeoF, "_pool_exists", _pool_exists, raising=True)
+    monkeypatch.setattr(nvmeof_mod.NVMeoF, "_rbd_pool_init", _rbd_pool_init, raising=True)
+
+    return mgr
+
+
+def test_pool_exists_skips_create_calls_enable_and_pool_init(monkeypatch):
+    calls = []
+
+    def mon_command(cmd):
+        calls.append(cmd)
+        return 0, "", ""
+
+    rbd_instance = patch_rbd_pool_init(monkeypatch)
+    mgr = make_mgr(mon_command, exists=True, monkeypatch=monkeypatch)
+
+    mgr.create_pool_if_not_exists()
+
+    assert not any(c.get("prefix") == "osd pool create" for c in calls)
+    assert any(c.get("prefix") == "osd pool application enable" for c in calls)
+
+    assert mgr._fake_rados.opened_pools == [".nvmeof"]
+    rbd_instance.pool_init.assert_called_once()
+
+
+def test_pool_missing_creates_then_enables_then_pool_init(monkeypatch):
+    calls = []
+
+    def mon_command(cmd):
+        calls.append(cmd)
+        return 0, "", ""
+
+    rbd_instance = patch_rbd_pool_init(monkeypatch)
+    mgr = make_mgr(mon_command, exists=False, monkeypatch=monkeypatch)
+
+    mgr.create_pool_if_not_exists()
+
+    assert any(c.get("prefix") == "osd pool create" for c in calls)
+    assert any(c.get("prefix") == "osd pool application enable" for c in calls)
+
+    assert mgr._fake_rados.opened_pools == [".nvmeof"]
+    rbd_instance.pool_init.assert_called_once()
index 5fc2fce63fae5501964671fd3f3264a62d2bff69..0194d54abeadd65d8e9a0cff100671ecc20d1a78 100644 (file)
@@ -2109,10 +2109,14 @@ Usage:
 
         return self._apply_misc([spec], dry_run, format, no_overwrite)
 
+    def _create_nvmeof_metadata_pool_if_needed(self) -> None:
+        self.remote('nvmeof', 'create_pool_if_not_exists')
+
     @OrchestratorCLICommand.Write('orch apply nvmeof')
     def _apply_nvmeof(self,
-                      pool: str,
-                      group: str,
+                      _end_positional_: int = 0,
+                      pool: str = ".nvmeof",
+                      group: str = '',
                       placement: Optional[str] = None,
                       unmanaged: bool = False,
                       dry_run: bool = False,
@@ -2120,11 +2124,18 @@ Usage:
                       no_overwrite: bool = False,
                       inbuf: Optional[str] = None) -> HandleCommandResult:
         """Scale an nvmeof service"""
+        if group == '':
+            raise OrchestratorValidationError('The --group argument is required')
+
         if inbuf:
             raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
 
+        if pool == ".nvmeof":
+            self._create_nvmeof_metadata_pool_if_needed()
+
+        cleanpool = pool.lstrip('.')
         spec = NvmeofServiceSpec(
-            service_id=f'{pool}.{group}' if group else pool,
+            service_id=f'{cleanpool}.{group}' if group else cleanpool,
             pool=pool,
             group=group,
             placement=PlacementSpec.from_string(placement),
index c2deb627261ecb3f50da391ff5b68e90205ff88b..35d7f972d00e12f78346c76d8039a8c6bf5e77fd 100644 (file)
@@ -93,6 +93,7 @@ commands =
            -m devicehealth \
            -m diskprediction_local \
            -m hello \
+           -m nvmeof \
            -m influx \
            -m iostat \
            -m localpool \
@@ -146,6 +147,7 @@ modules =
     devicehealth \
     diskprediction_local \
     hello \
+    nvmeof \
     iostat \
     localpool \
     mgr_module.py \