create loopback devices capable of holding OSDs.
.. note:: Each osd will require 5GiB of space.
-After bootstraping the cluster you can go inside the seed box in which you'll be
+After bootstrapping the cluster you can go inside the seed box in which you'll be
able to run Cephadm commands::
./box.py -v cluster bash
- ceph orch ps
- ceph orch ls
- ceph versions
- # to make sure mgr daemons upgrade is fully completed, including being deployed by a mgr on new new version
+ # to make sure mgr daemons upgrade is fully completed, including being deployed by a mgr on a new version
# also serves as an early failure if manually upgrading the mgrs failed as --daemon-types won't be recognized
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr
- while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
# verify only one version found for mgrs and that their version hash matches what we are upgrading to
- ceph versions | jq -e '.mgr | length == 1'
- ceph versions | jq -e '.mgr | keys' | grep $sha1
- # verify overall we still se two versions, basically to make sure --daemon-types wans't ignored and all daemons upgraded
+ # verify overall we still see two versions, basically to make sure --daemon-types wasn't ignored and all daemons upgraded
- ceph versions | jq -e '.overall | length == 2'
# check that exactly two daemons have been upgraded to the new image (our 2 mgr daemons)
- ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2'
$SUDO losetup $loop_dev $TMPDIR/$OSD_IMAGE_NAME
$SUDO pvcreate $loop_dev && $SUDO vgcreate $OSD_VG_NAME $loop_dev
-# osd boostrap keyring
+# osd bootstrap keyring
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph auth get client.bootstrap-osd > $TMPDIR/keyring.bootstrap.osd
@staticmethod
def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]:
- """Return the version of the notifer from it's http endpoint"""
+ """Return the version of the notifier from it's http endpoint"""
path = os.path.join(ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta')
try:
with open(path, 'r') as env:
# type: () -> CephContainer
# daemon_id, is used to generated the cid and pid files used by podman but as both tcmu-runner
# and rbd-target-api have the same daemon_id, it conflits and prevent the second container from
- # starting. .tcmu runner is appened to the daemon_id to fix that.
+ # starting. .tcmu runner is appended to the daemon_id to fix that.
tcmu_container = get_container(self.ctx, self.fsid, self.daemon_type, str(self.daemon_id) + '.tcmu')
tcmu_container.entrypoint = '/usr/bin/tcmu-runner'
tcmu_container.cname = self.get_container_name(desc='tcmu')
def release(self, force: bool = False) -> None:
"""
Releases the file lock.
- Please note, that the lock is only completly released, if the lock
+ Please note, that the lock is only completely released, if the lock
counter is 0.
Also note, that the lock file itself is not automatically deleted.
:arg bool force:
def infer_config(func: FuncT) -> FuncT:
"""
- Infer the clusater configuration using the followign priority order:
+ Infer the cluster configuration using the following priority order:
1- if the user has provided custom conf file (-c option) use it
2- otherwise if daemon --name has been provided use daemon conf
3- otherwise find the mon daemon conf file and use it (if v1)
if 'fsid' in ctx and ctx.fsid:
name = ctx.name if ('name' in ctx and ctx.name) else get_mon_daemon_name(ctx.fsid)
if name is not None:
- # daemon name has been specified (or inffered from mon), let's use its conf
+ # daemon name has been specified (or inferred from mon), let's use its conf
ctx.config = config_path(name.split('.', 1)[0], name.split('.', 1)[1])
else:
# no daemon, in case the cluster has a config dir then use it
addrv_args = []
addr_arg = addrv_arg
if addr_arg[0] != '[' or addr_arg[-1] != ']':
- raise Error(f'--mon-addrv value {addr_arg} must use square backets')
+ raise Error(f'--mon-addrv value {addr_arg} must use square brackets')
for addr in addr_arg[1: -1].split(','):
hasport = r.findall(addr)
cluster_network = cp.get('global', 'cluster_network').strip('"').strip("'")
if cluster_network:
- cluser_nets = set([x.strip() for x in cluster_network.split(',')])
+ cluster_nets = set([x.strip() for x in cluster_network.split(',')])
local_subnets = set([x[0] for x in list_networks(ctx).items()])
- for net in cluser_nets:
+ for net in cluster_nets:
if net not in local_subnets:
logger.warning(f'The cluster CIDR network {net} is not configured locally.')
def kubic_repo_path(self) -> str:
return '/etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list'
- def kubric_repo_gpgkey_url(self) -> str:
+ def kubic_repo_gpgkey_url(self) -> str:
return '%s/Release.key' % self.kubic_repo_url()
- def kubric_repo_gpgkey_path(self) -> str:
+ def kubic_repo_gpgkey_path(self) -> str:
return '/etc/apt/trusted.gpg.d/kubic.release.gpg'
def add_kubic_repo(self) -> None:
- url = self.kubric_repo_gpgkey_url()
+ url = self.kubic_repo_gpgkey_url()
logger.info('Installing repo GPG key from %s...' % url)
try:
response = urlopen(url)
raise Error('failed to fetch GPG key')
key = response.read().decode('utf-8')
tmp_key = write_tmp(key, 0, 0)
- keyring = self.kubric_repo_gpgkey_path()
+ keyring = self.kubic_repo_gpgkey_path()
call_throws(self.ctx, ['apt-key', '--keyring', keyring, 'add', tmp_key.name])
logger.info('Installing repo file at %s...' % self.kubic_repo_path())
f.write(content)
def rm_kubic_repo(self) -> None:
- keyring = self.kubric_repo_gpgkey_path()
+ keyring = self.kubic_repo_gpgkey_path()
if os.path.exists(keyring):
logger.info('Removing repo GPG key %s...' % keyring)
os.unlink(keyring)
except Exception:
raise Error('version must be in the form x.y.z (e.g., 15.2.0)')
if ctx.release:
- # Pacific =/= pacific in this case, set to undercase to avoid confision
+ # Pacific =/= pacific in this case, set to undercase to avoid confusion
ctx.release = ctx.release.lower()
pkg = create_packager(ctx, stable=ctx.release,
Enclosures are detected by walking the scsi generic sysfs hierarchy.
Any device tree that holds an 'enclosure' subdirectory is interpreted as
- an enclosure. Once identified the enclosire directory is analysis to
+ an enclosure. Once identified the enclosure directory is analysis to
identify key descriptors that will help relate disks to enclosures and
disks to enclosure slots.
if os.path.exists(enc_path):
enc_dirs = glob(os.path.join(enc_path, '*'))
if len(enc_dirs) != 1:
- # incomplete enclosure spec - expecting ONE dir in the fomrat
+ # incomplete enclosure spec - expecting ONE dir in the format
# host(adapter):bus:target:lun e.g. 16:0:0:0
continue
enc_path = enc_dirs[0]
def command_gather_facts(ctx: CephadmContext) -> None:
- """gather_facts is intended to provide host releated metadata to the caller"""
+ """gather_facts is intended to provide host related metadata to the caller"""
host = HostFacts(ctx)
print(host.dump())
parser_adopt.add_argument(
'--force-start',
action='store_true',
- help='start newly adoped daemon, even if it was not running previously')
+ help='start newly adopted daemon, even if it was not running previously')
parser_adopt.add_argument(
'--container-init',
action='store_true',
'--extra-container-args',
action='append',
default=[],
- help='Additional container arguments to apply to deamon'
+ help='Additional container arguments to apply to daemon'
)
parser_deploy.add_argument(
'--extra-entrypoint-args',
-e KEEPALIVED_UNICAST_SRC_IP=10.10.0.21 \
-e KEEPALIVED_UNICAST_PEER_0=10.10.0.22 \
-e KEEPALIVED_TRACK_INTERFACE_1=eth0 \
--e KEEPALVED_TRACK_INTERFACE_2=eth1 \
+-e KEEPALIVED_TRACK_INTERFACE_2=eth1 \
-e KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.3/24 dev eth0" \
-e KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1" \
quay.io/ceph/keepalived
-e KEEPALIVED_UNICAST_SRC_IP=10.10.0.22 \
-e KEEPALIVED_UNICAST_PEER_0=10.10.0.21 \
-e KEEPALIVED_TRACK_INTERFACE_1=eth0 \
--e KEEPALVED_TRACK_INTERFACE_2=eth1 \
+-e KEEPALIVED_TRACK_INTERFACE_2=eth1 \
-e KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.3/24 dev eth0" \
-e KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1" \
quay.io/ceph/keepalived
(
'192.168.1.1',
{'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
- r'must use square backets',
+ r'must use square brackets',
),
(
'[192.168.1.1]',
# vi: set ft=ruby :
#
# In order to reduce the need of recreating all vagrant boxes everytime they
-# get dirty, snaptshot them and revert the snapshot of them instead.
+# get dirty, snapshot them and revert the snapshot of them instead.
# Two helpful scripts to do this easily can be found here:
# https://github.com/Devp00l/vagrant-helper-scripts
"os_subscription",
self._check_subscription),
CephadmCheckDefinition(mgr, "CEPHADM_CHECK_PUBLIC_MEMBERSHIP",
- "check that all hosts have a NIC on the Ceph public_netork",
+ "check that all hosts have a NIC on the Ceph public_network",
"public_network",
self._check_public_network),
CephadmCheckDefinition(mgr, "CEPHADM_CHECK_MTU",
self.active_checks = []
self.skipped_checks = []
- # process all healthchecks that are not explcitly disabled
+ # process all healthchecks that are not explicitly disabled
for health_check in self.health_checks:
if check_config.get(health_check.name, '') != 'disabled':
self.active_checks.append(health_check.name)
type='int',
default=None,
desc='internal - do not modify',
- # used to track track spec and other data migrations.
+ # used to track spec and other data migrations.
),
Option(
'config_dashboard',
self.migration = Migrations(self)
- _service_clses: Sequence[Type[CephadmService]] = [
+ _service_classes: Sequence[Type[CephadmService]] = [
OSDService, NFSService, MonService, MgrService, MdsService,
RgwService, RbdMirrorService, GrafanaService, AlertmanagerService,
PrometheusService, NodeExporterService, LokiService, PromtailService, CrashService, IscsiService,
# https://github.com/python/mypy/issues/8993
self.cephadm_services: Dict[str, CephadmService] = {
- cls.TYPE: cls(self) for cls in _service_clses} # type: ignore
+ cls.TYPE: cls(self) for cls in _service_classes} # type: ignore
self.mgr_service: MgrService = cast(MgrService, self.cephadm_services['mgr'])
self.osd_service: OSDService = cast(OSDService, self.cephadm_services['osd'])
def exit_host_maintenance(self, hostname: str) -> str:
"""Exit maintenance mode and return a host to an operational state
- Returning from maintnenance will enable the clusters systemd target and
+ Returning from maintenance will enable the clusters systemd target and
start it, and remove any noout that has been added for the host if the
host has osd daemons
"""
Deprecated. Please use `apply()` instead.
- Keeping this around to be compapatible to mgr/dashboard
+ Keeping this around to be compatible to mgr/dashboard
"""
return [self._apply(spec) for spec in specs]
# TODO: At some point we want to deploy daemons that are on offline hosts
# at what point we do this differs per daemon type. Stateless daemons we could
- # do quickly to improve availability. Steful daemons we might want to wait longer
+ # do quickly to improve availability. Stateful daemons we might want to wait longer
# to see if the host comes back online
existing = existing_active + existing_standby
out, err, code = await self.mgr.ssh._execute_command(
host, cmd, stdin=stdin, addr=addr)
# if there is an agent on this host, make sure it is using the most recent
- # vesion of cephadm binary
+ # version of cephadm binary
if host in self.mgr.inventory:
for agent in self.mgr.cache.get_daemons_by_type('agent', host):
self.mgr._schedule_daemon_action(agent.name(), 'redeploy')
raise NotImplementedError()
def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
- # if this is called for a service type where it hasn't explcitly been
+ # if this is called for a service type where it hasn't explicitly been
# defined, return empty Daemon Desc
return DaemonDescription()
def _ready_to_drain_osds(self) -> List["OSD"]:
"""
Returns OSDs that are ok to stop and not yet draining. Only returns as many OSDs as can
- be accomodated by the 'max_osd_draining_count' config value, considering the number of OSDs
+ be accommodated by the 'max_osd_draining_count' config value, considering the number of OSDs
that are already draining.
"""
draining_limit = max(1, self.mgr.max_osd_draining_count)
CephadmServe(cephadm_module)._apply_all_services()
assert len(cephadm_module.cache.get_daemons_by_type('iscsi')) == 2
- # get a deamons from postaction list (ARRGH sets!!)
+ # get a daemons from postaction list (ARRGH sets!!)
tempset = cephadm_module.requires_post_actions.copy()
- tempdeamon1 = tempset.pop()
- tempdeamon2 = tempset.pop()
+ tempdaemon1 = tempset.pop()
+ tempdaemon2 = tempset.pop()
# make sure post actions has 2 daemons in it
assert len(cephadm_module.requires_post_actions) == 2
# replicate a host cache that is not in sync when check_daemons is called
- tempdd1 = cephadm_module.cache.get_daemon(tempdeamon1)
- tempdd2 = cephadm_module.cache.get_daemon(tempdeamon2)
+ tempdd1 = cephadm_module.cache.get_daemon(tempdaemon1)
+ tempdd2 = cephadm_module.cache.get_daemon(tempdaemon2)
host = 'test1'
- if 'test1' not in tempdeamon1:
+ if 'test1' not in tempdaemon1:
host = 'test2'
- cephadm_module.cache.rm_daemon(host, tempdeamon1)
+ cephadm_module.cache.rm_daemon(host, tempdaemon1)
# Make sure, _check_daemons does a redeploy due to monmap change:
cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
CephadmServe(cephadm_module)._check_daemons()
_cfg_db.assert_called_once_with([tempdd2])
- # post actions still has the other deamon in it and will run next _check_deamons
+ # post actions still has the other daemon in it and will run next _check_daemons
assert len(cephadm_module.requires_post_actions) == 1
# post actions was missed for a daemon
- assert tempdeamon1 in cephadm_module.requires_post_actions
+ assert tempdaemon1 in cephadm_module.requires_post_actions
# put the daemon back in the cache
cephadm_module.cache.add_daemon(host, tempdd1)
c = cephadm_module.create_osds(dg)
out = wait(cephadm_module, c)
assert out == "Created no osd(s) on host test; already created?"
- bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_hsot'),
+ bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_host'),
data_devices=DeviceSelection(paths=['']))
c = cephadm_module.create_osds(bad_dg)
out = wait(cephadm_module, c)
# * where e=[], *=any
#
# + list of known hosts available for scheduling (host_key)
-# | + hosts used for explict placement (explicit_key)
+# | + hosts used for explicit placement (explicit_key)
# | | + count
# | | | + existing daemons
# | | | | + section (host, label, pattern)
'rgw:host1(*:81)', 'rgw:host2(*:81)', 'rgw:host3(*:81)'],
[]
),
- # label + count_per_host + ports (+ xisting)
+ # label + count_per_host + ports (+ existing)
NodeAssignmentTest(
'rgw',
PlacementSpec(count=6, label='foo'),
def forall_hosts_wrapper(*args: Any) -> List[T]:
from cephadm.module import CephadmOrchestrator
- # Some weired logic to make calling functions with multiple arguments work.
+ # Some weird logic to make calling functions with multiple arguments work.
if len(args) == 1:
vals = args[0]
self = None
/**
- * To use a permenant config copy this file to "vagrant.config.json",
- * edit it and remove this comment beacuase comments are not allowed
+ * To use a permanent config copy this file to "vagrant.config.json",
+ * edit it and remove this comment because comments are not allowed
* in a valid JSON file.
*/
Typical use:
- filter by host when presentig UI workflow for configuring
+ filter by host when presenting UI workflow for configuring
a particular server.
filter by label when not all of estate is Ceph servers,
and we want to only learn about the Ceph servers.
>>> import mgr_module
>>> #doctest: +SKIP
- ... class MyImplentation(mgr_module.MgrModule, Orchestrator):
+ ... class MyImplementation(mgr_module.MgrModule, Orchestrator):
... def __init__(self, ...):
... self.orch_client = OrchestratorClientMixin()
... self.orch_client.set_mgr(self.mgr))
def set_mgr(self, mgr: MgrModule) -> None:
"""
- Useable in the Dashbord that uses a global ``mgr``
+ Useable in the Dashboard that uses a global ``mgr``
"""
self.__mgr = mgr # Make sure we're not overwriting any other `mgr` properties
self.vendor = vendor
#: Size specification of format LOW:HIGH.
- #: Can also take the the form :HIGH, LOW:
+ #: Can also take the form :HIGH, LOW:
#: or an exact value (as ceph-volume inventory reports)
self.size: Optional[str] = size
# type: (str, str) -> None
# The 'key' value is overwritten here because
- # the user_defined attribute does not neccessarily
+ # the user_defined attribute does not necessarily
# correspond to the desired attribute
# requested from the inventory output
Matcher.__init__(self, key, value)
if not disk:
return False
disk_value = self._get_disk_key(disk)
- # This doesn't neccessarily have to be a float.
+ # This doesn't necessarily have to be a float.
# The current output from ceph-volume gives a float..
# This may change in the future..
# todo: harden this paragraph
if disk_size_in_byte <= self.to_byte(
self.high) and disk_size_in_byte >= self.to_byte(self.low):
return True
- # is a else: return False neccessary here?
+ # is a else: return False necessary here?
# (and in all other branches)
logger.debug("Disk didn't match for 'high/low' filter")
understanding of what fields are special for a give service type.
Note, we'll need to stay compatible with both versions for the
- the next two major releases (octoups, pacific).
+ the next two major releases (octopus, pacific).
:param json_spec: A valid dict with ServiceSpec