]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: Fix count for OSDs with OSD specs 44629/head
authorSebastian Wagner <sewagner@redhat.com>
Mon, 20 Dec 2021 15:08:07 +0000 (16:08 +0100)
committerAdam King <adking@redhat.com>
Mon, 21 Feb 2022 19:56:06 +0000 (14:56 -0500)
osd counting is special

Signed-off-by: Sebastian Wagner <sewagner@redhat.com>
Conflicts:
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/fixtures.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

index 0b7a3edd2f8e72badd6ec3f6f370ce2a24ab3c44..1c0e37af0edf16365aa878ca352da97989ca3d69 100644 (file)
@@ -1868,9 +1868,16 @@ Then run the following:
                 continue
             if service_name is not None and service_name != nm:
                 continue
+
+            if spec.service_type != 'osd':
+                size = spec.placement.get_target_count(self._schedulable_hosts())
+            else:
+                # osd counting is special
+                size = 0
+
             sm[nm] = orchestrator.ServiceDescription(
                 spec=spec,
-                size=spec.placement.get_target_count(self._schedulable_hosts()),
+                size=size,
                 running=0,
                 events=self.events.get_for_service(spec.service_name()),
                 created=self.spec_store.spec_created[nm],
index 20f26766340ed70d3007dc2593e419b8a4d4a1dc..acacfb8ab4337395d1153d2d40b8827a6f38d291 100644 (file)
@@ -123,7 +123,7 @@ def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth=No
 
     dds = wait(cephadm_module, cephadm_module.list_daemons())
     own_dds = [dd for dd in dds if dd.service_name() == spec.service_name()]
-    if host:
+    if host and spec.service_type != 'osd':
         assert own_dds
 
     yield [dd.name() for dd in own_dds]
index 0cefe8c9bf84086188e461a89ca97f8e2db68442..4d40b161c2c9053c931dac897f74edebc6633b6a 100644 (file)
@@ -1735,6 +1735,15 @@ Traceback (most recent call last):
             with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list):
                 pass
 
+    @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
+    def test_osd_count(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+        _run_cephadm.return_value = ('{}', '', 0)
+        dg = DriveGroupSpec(service_id='', data_devices=DeviceSelection(all=True))
+        with with_host(cephadm_module, 'test', refresh_hosts=False):
+            with with_service(cephadm_module, dg, host='test'):
+                with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1):
+                    assert wait(cephadm_module, cephadm_module.describe_service())[0].size == 1
+
     @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
     def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator):
         with pytest.raises(OrchestratorError):