]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
py-common/deployment/translate: drop unneeded ctor arg
authorJan Fajerski <jfajerski@suse.com>
Fri, 26 Jun 2020 13:18:18 +0000 (15:18 +0200)
committerSebastian Wagner <sebastian.wagner@suse.com>
Tue, 14 Jul 2020 09:39:06 +0000 (11:39 +0200)
The DriveGroupSpec is already part of the DriveSelection.

Fixes: https://tracker.ceph.com/issues/46231
Signed-off-by: Jan Fajerski <jfajerski@suse.com>
(cherry picked from commit 1a485406da49977a1fc83dda8204b9b94634bec1)

src/pybind/mgr/cephadm/services/osd.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/python-common/ceph/deployment/translate.py
src/python-common/ceph/tests/test_drive_group.py

index 470c0cfbf13f317c676b42a815e2e403c7e50f87..10efea6c1598890edd3a25e92af8b3d02307d37e 100644 (file)
@@ -23,12 +23,12 @@ class OSDService(CephadmService):
     def create(self, drive_group: DriveGroupSpec) -> str:
         logger.debug(f"Processing DriveGroup {drive_group}")
         ret = []
-        drive_group.osd_id_claims = self.find_destroyed_osds()
-        logger.info(f"Found osd claims for drivegroup {drive_group.service_id} -> {drive_group.osd_id_claims}")
+        osd_id_claims = self.find_destroyed_osds()
+        logger.info(f"Found osd claims for drivegroup {drive_group.service_id} -> {osd_id_claims}")
         for host, drive_selection in self.prepare_drivegroup(drive_group):
             logger.info('Applying %s on host %s...' % (drive_group.service_id, host))
-            cmd = self.driveselection_to_ceph_volume(drive_group, drive_selection,
-                                                     drive_group.osd_id_claims.get(host, []))
+            cmd = self.driveselection_to_ceph_volume(drive_selection,
+                                                     osd_id_claims.get(host, []))
             if not cmd:
                 logger.debug("No data_devices, skipping DriveGroup: {}".format(drive_group.service_id))
                 continue
@@ -36,11 +36,11 @@ class OSDService(CephadmService):
             # disable this until https://github.com/ceph/ceph/pull/34835 is merged
             env_vars: List[str] = []
             ret_msg = self.create_single_host(
-                host, cmd, replace_osd_ids=drive_group.osd_id_claims.get(host, []), env_vars=env_vars
+                host, cmd, replace_osd_ids=osd_id_claims.get(host, []), env_vars=env_vars
             )
             ret.append(ret_msg)
         return ", ".join(ret)
-        
+
     def create_single_host(self, host: str, cmd: str, replace_osd_ids=None, env_vars: Optional[List[str]] = None) -> str:
         out, err, code = self._run_ceph_volume_command(host, cmd, env_vars=env_vars)
 
@@ -122,12 +122,12 @@ class OSDService(CephadmService):
             host_ds_map.append((host, drive_selection))
         return host_ds_map
 
-    def driveselection_to_ceph_volume(self, drive_group: DriveGroupSpec,
+    def driveselection_to_ceph_volume(self,
                                       drive_selection: DriveSelection,
                                       osd_id_claims: Optional[List[str]] = None,
                                       preview: bool = False) -> Optional[str]:
-        logger.debug(f"Translating DriveGroup <{drive_group}> to ceph-volume command")
-        cmd: Optional[str] = translate.to_ceph_volume(drive_group, drive_selection,
+        logger.debug(f"Translating DriveGroup <{drive_selection.spec}> to ceph-volume command")
+        cmd: Optional[str] = translate.to_ceph_volume(drive_selection,
                                                       osd_id_claims, preview=preview).run()
         logger.debug(f"Resulting ceph-volume cmd: {cmd}")
         return cmd
@@ -163,7 +163,7 @@ class OSDService(CephadmService):
         for osdspec in osdspecs:
 
             # populate osd_id_claims
-            osdspec.osd_id_claims = self.find_destroyed_osds()
+            osd_id_claims = self.find_destroyed_osds()
 
             # prepare driveselection
             for host, ds in self.prepare_drivegroup(osdspec):
@@ -171,9 +171,8 @@ class OSDService(CephadmService):
                     continue
 
                 # driveselection for host
-                cmd = self.driveselection_to_ceph_volume(osdspec,
-                                                         ds,
-                                                         osdspec.osd_id_claims.get(host, []),
+                cmd = self.driveselection_to_ceph_volume(ds,
+                                                         osd_id_claims.get(host, []),
                                                          preview=True)
                 if not cmd:
                     logger.debug("No data_devices, skipping DriveGroup: {}".format(
index 18ff7a725fc63d42f62d76c31b0f3183bb436414..5f9f7bd0aa7e724a46657b04e9a593611ac1b85c 100644 (file)
@@ -332,7 +332,7 @@ class TestCephadm(object):
             dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices))
             ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
             preview = preview
-            out = cephadm_module.osd_service.driveselection_to_ceph_volume(dg, ds, [], preview)
+            out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
             assert out in exp_command
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
index 2412e86186734a4d8b84f59edc815449e5f2ea49..7eb0ec1d5059ce470d75f054f900c381e0bd113b 100644 (file)
@@ -5,7 +5,6 @@ try:
 except ImportError:
     pass
 
-from ceph.deployment.drive_group import DriveGroupSpec
 from ceph.deployment.drive_selection.selector import DriveSelection
 
 logger = logging.getLogger(__name__)
@@ -14,14 +13,13 @@ logger = logging.getLogger(__name__)
 class to_ceph_volume(object):
 
     def __init__(self,
-                 spec,  # type: DriveGroupSpec
                  selection,  # type: DriveSelection
                  osd_id_claims=None,  # type: Optional[List[str]]
                  preview=False  # type: bool
                  ):
 
-        self.spec = spec
         self.selection = selection
+        self.spec = selection.spec
         self.preview = preview
         self.osd_id_claims = osd_id_claims
 
index d98152bc374fb328cb08749ef33ca2fec00f7a36..ba3cd6ec949f95f719d49aaa767fb4d9c6509586 100644 (file)
@@ -73,7 +73,7 @@ def test_ceph_volume_command_0():
                           )
     inventory = _mk_inventory(_mk_device()*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd'
 
 
@@ -84,7 +84,7 @@ def test_ceph_volume_command_1():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
                    '--db-devices /dev/sdc /dev/sdd --yes --no-systemd')
 
@@ -100,7 +100,7 @@ def test_ceph_volume_command_2():
                               _mk_device(size="10.0 GB", rotational=False)*2
                               )
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
                    '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
                    '--yes --no-systemd')
@@ -118,7 +118,7 @@ def test_ceph_volume_command_3():
                               _mk_device(size="10.0 GB", rotational=False)*2
                               )
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
                    '--db-devices /dev/sdc /dev/sdd '
                    '--wal-devices /dev/sde /dev/sdf --dmcrypt '
@@ -140,7 +140,7 @@ def test_ceph_volume_command_4():
                               _mk_device(size="10.0 GB", rotational=False)*2
                               )
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
                    '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
                    '--block-wal-size 500M --block-db-size 500M --dmcrypt '
@@ -154,7 +154,7 @@ def test_ceph_volume_command_5():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --filestore --yes --no-systemd'
 
 
@@ -167,7 +167,7 @@ def test_ceph_volume_command_6():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, []).run()
+    cmd = translate.to_ceph_volume(sel, []).run()
     assert cmd == ('lvm batch --no-auto /dev/sdc /dev/sdd '
                    '--journal-size 500M --journal-devices /dev/sda /dev/sdb '
                    '--filestore --yes --no-systemd')
@@ -180,5 +180,5 @@ def test_ceph_volume_command_7():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.to_ceph_volume(spec, sel, ['0', '1']).run()
+    cmd = translate.to_ceph_volume(sel, ['0', '1']).run()
     assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --osd-ids 0 1 --yes --no-systemd'