--------------
.. automethod:: Orchestrator.create_osds
-.. automethod:: Orchestrator.replace_osds
.. automethod:: Orchestrator.remove_osds
.. py:currentmodule:: ceph.deployment.drive_group
.. py:currentmodule:: orchestrator
+.. _orchestrator-osd-replace:
+
+OSD Replacement
+^^^^^^^^^^^^^^^
+
+See :ref:`rados-replacing-an-osd` for the underlying process.
+
+Replacing OSDs is fundamentally a two-staged process, as users need to
+physically replace drives. The orchestrator therefor exposes this two-staged process.
+
+Phase one is a call to :meth:`Orchestrator.remove_osds` with ``destroy=True`` in order to mark
+the OSD as destroyed.
+
+
+Phase two is a call to :meth:`Orchestrator.create_osds` with a Drive Group with
+
+.. py:currentmodule:: ceph.deployment.drive_group
+
+:attr:`DriveGroupSpec.osd_id_claims` set to the destroyed OSD ids.
+
+.. py:currentmodule:: orchestrator
+
Stateless Services
------------------
"""
raise NotImplementedError()
- def replace_osds(self, drive_group):
- # type: (DriveGroupSpec) -> WriteCompletion
- """
- Like create_osds, but the osd_id_claims must be fully
- populated.
- """
- raise NotImplementedError()
-
- def remove_osds(self, osd_ids):
- # type: (List[str]) -> WriteCompletion
+ def remove_osds(self, osd_ids, destroy=False):
+ # type: (List[str], bool) -> WriteCompletion
"""
:param osd_ids: list of OSD IDs
+ :param destroy: marks the OSD as being destroyed. See :ref:`orchestrator-osd-replace`
Note that this can only remove OSDs that were successfully
created (i.e. got an OSD ID).
import fnmatch
try:
- from typing import Optional, List
+ from typing import Optional, List, Dict
except ImportError:
pass
Describe a drive group in the same form that ceph-volume
understands.
"""
- def __init__(self, host_pattern, data_devices=None, db_devices=None, wal_devices=None, journal_devices=None,
- data_directories=None, osds_per_device=None, objectstore='bluestore', encrypted=False,
- db_slots=None, wal_slots=None):
- # type: (str, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> None
+
+ def __init__(self,
+ host_pattern, # type: str
+ data_devices=None, # type: Optional[DeviceSelection]
+ db_devices=None, # type: Optional[DeviceSelection]
+ wal_devices=None, # type: Optional[DeviceSelection]
+ journal_devices=None, # type: Optional[DeviceSelection]
+ data_directories=None, # type: Optional[List[str]]
+ osds_per_device=None, # type: Optional[int]
+ objectstore='bluestore', # type: str
+ encrypted=False, # type: bool
+ db_slots=None, # type: Optional[int]
+ wal_slots=None, # type: Optional[int]
+ osd_id_claims=None, # type: Optional[Dict[str, DeviceSelection]]
+ ):
# concept of applying a drive group to a (set) of hosts is tightly
# linked to the drive group itself
#: How many OSDs per WAL device
self.wal_slots = wal_slots
- # FIXME: needs ceph-volume support
- #: Optional: mapping of drive to OSD ID, used when the
+ #: Optional: mapping of OSD id to DeviceSelection, used when the
#: created OSDs are meant to replace previous OSDs on
- #: the same node.
- self.osd_id_claims = {}
+ #: the same node. See :ref:`orchestrator-osd-replace`
+ self.osd_id_claims = osd_id_claims
@classmethod
def from_json(self, json_drive_group):