]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/orchestrator: Document OSD replacement 29792/head
authorSebastian Wagner <sebastian.wagner@suse.com>
Wed, 21 Aug 2019 12:22:24 +0000 (14:22 +0200)
committerSebastian Wagner <sebastian.wagner@suse.com>
Wed, 21 Aug 2019 13:30:01 +0000 (15:30 +0200)
Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
doc/mgr/orchestrator_modules.rst
src/pybind/mgr/ansible/module.py
src/pybind/mgr/orchestrator.py
src/pybind/mgr/test_orchestrator/module.py
src/python-common/ceph/deployment/drive_group.py

index fb775744072a930d8065d90cf6a45ef1b6d23b2a..b3c7c1b23717065aec60f4e7bdb4bb95ccced55a 100644 (file)
@@ -247,7 +247,6 @@ OSD management
 --------------
 
 .. automethod:: Orchestrator.create_osds
-.. automethod:: Orchestrator.replace_osds
 .. automethod:: Orchestrator.remove_osds
 
 .. py:currentmodule:: ceph.deployment.drive_group
@@ -261,6 +260,28 @@ OSD management
 
 .. py:currentmodule:: orchestrator
 
+.. _orchestrator-osd-replace:
+
+OSD Replacement
+^^^^^^^^^^^^^^^
+
+See :ref:`rados-replacing-an-osd` for the underlying process.
+
+Replacing OSDs is fundamentally a two-staged process, as users need to
+physically replace drives. The orchestrator therefor exposes this two-staged process.
+
+Phase one is a call to :meth:`Orchestrator.remove_osds` with ``destroy=True`` in order to mark
+the OSD as destroyed.
+
+
+Phase two is a call to  :meth:`Orchestrator.create_osds` with a Drive Group with
+
+.. py:currentmodule:: ceph.deployment.drive_group
+
+:attr:`DriveGroupSpec.osd_id_claims` set to the destroyed OSD ids.
+
+.. py:currentmodule:: orchestrator
+
 Stateless Services
 ------------------
 
index cb60dc7f818ace51bfe9c4f7b8cd1bca66f3663b..8103f65ad6c7165173105ff3ba7a355c729ead3b 100644 (file)
@@ -586,11 +586,13 @@ class Module(MgrModule, orchestrator.Orchestrator):
 
         return playbook_operation
 
-    def remove_osds(self, osd_ids):
+    def remove_osds(self, osd_ids, destroy=False):
         """Remove osd's.
 
         :param osd_ids: List of osd's to be removed (List[int])
+        :param destroy: unsupported.
         """
+        assert not destroy
 
         extravars = {'osd_to_kill': ",".join([str(osd_id) for osd_id in osd_ids]),
                      'ireallymeanit':'yes'}
index 7a578b6e8630717d8c780424cba44178c6ff52fe..236949d75d3cb55774c738cdd933821ee25166c9 100644 (file)
@@ -432,18 +432,11 @@ class Orchestrator(object):
         """
         raise NotImplementedError()
 
-    def replace_osds(self, drive_group):
-        # type: (DriveGroupSpec) -> WriteCompletion
-        """
-        Like create_osds, but the osd_id_claims must be fully
-        populated.
-        """
-        raise NotImplementedError()
-
-    def remove_osds(self, osd_ids):
-        # type: (List[str]) -> WriteCompletion
+    def remove_osds(self, osd_ids, destroy=False):
+        # type: (List[str], bool) -> WriteCompletion
         """
         :param osd_ids: list of OSD IDs
+        :param destroy: marks the OSD as being destroyed. See :ref:`orchestrator-osd-replace`
 
         Note that this can only remove OSDs that were successfully
         created (i.e. got an OSD ID).
index d63139219abed8f4231be7a963681e865d155f88..22dc9b96ed87527f08b342dd0d2baceb35c380d6 100644 (file)
@@ -207,7 +207,7 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
         drive_group.validate(all_hosts)
 
     @deferred_write("remove_osds")
-    def remove_osds(self, osd_ids):
+    def remove_osds(self, osd_ids, destroy=False):
         assert isinstance(osd_ids, list)
 
     @deferred_write("service_action")
index b77add828448e77c1a28b956263e2fb9aee3ba56..ff6719dea9f6c89ce1af65af82d3dae11d84a9ea 100644 (file)
@@ -1,6 +1,6 @@
 import fnmatch
 try:
-    from typing import Optional, List
+    from typing import Optional, List, Dict
 except ImportError:
     pass
 
@@ -63,10 +63,21 @@ class DriveGroupSpec(object):
     Describe a drive group in the same form that ceph-volume
     understands.
     """
-    def __init__(self, host_pattern, data_devices=None, db_devices=None, wal_devices=None, journal_devices=None,
-                 data_directories=None, osds_per_device=None, objectstore='bluestore', encrypted=False,
-                 db_slots=None, wal_slots=None):
-        # type: (str, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> None
+
+    def __init__(self,
+                 host_pattern,  # type: str
+                 data_devices=None,  # type: Optional[DeviceSelection]
+                 db_devices=None,  # type: Optional[DeviceSelection]
+                 wal_devices=None,  # type: Optional[DeviceSelection]
+                 journal_devices=None,  # type: Optional[DeviceSelection]
+                 data_directories=None,  # type: Optional[List[str]]
+                 osds_per_device=None,  # type: Optional[int]
+                 objectstore='bluestore',  # type: str
+                 encrypted=False,  # type: bool
+                 db_slots=None,  # type: Optional[int]
+                 wal_slots=None,  # type: Optional[int]
+                 osd_id_claims=None,  # type: Optional[Dict[str, DeviceSelection]]
+                 ):
 
         # concept of applying a drive group to a (set) of hosts is tightly
         # linked to the drive group itself
@@ -105,11 +116,10 @@ class DriveGroupSpec(object):
         #: How many OSDs per WAL device
         self.wal_slots = wal_slots
 
-        # FIXME: needs ceph-volume support
-        #: Optional: mapping of drive to OSD ID, used when the
+        #: Optional: mapping of OSD id to DeviceSelection, used when the
         #: created OSDs are meant to replace previous OSDs on
-        #: the same node.
-        self.osd_id_claims = {}
+        #: the same node. See :ref:`orchestrator-osd-replace`
+        self.osd_id_claims = osd_id_claims
 
     @classmethod
     def from_json(self, json_drive_group):