From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Wed, 26 Apr 2023 13:16:36 +0000 (-0400) Subject: orchestrator: fix spelling errors X-Git-Tag: v18.1.2~31^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=38d31db98988cf865d7c6c568799247774f8b50d;p=ceph-ci.git orchestrator: fix spelling errors * a new * accommodated * adopted * appended * because * bootstrap * bootstrapping * brackets * classes * cluster * compatible * completely * confusion * daemon * daemons * dashboard * enclosure * existing * explicit * following * format * host * implementation * inferred * keepalived * kubic * maintenance * necessarily * necessary * network * notifier * octopus * permanent * presenting * related * see * snapshot * stateful * the * track * version * wasn't * weird Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> (cherry picked from commit 94ade0cd16ca82e56b936cc3e1e046f8099a5619) --- diff --git a/doc/dev/cephadm/developing-cephadm.rst b/doc/dev/cephadm/developing-cephadm.rst index fe6abf4ee31..8df292330ec 100644 --- a/doc/dev/cephadm/developing-cephadm.rst +++ b/doc/dev/cephadm/developing-cephadm.rst @@ -286,7 +286,7 @@ of the cluster. create loopback devices capable of holding OSDs. .. note:: Each osd will require 5GiB of space. -After bootstraping the cluster you can go inside the seed box in which you'll be +After bootstrapping the cluster you can go inside the seed box in which you'll be able to run Cephadm commands:: ./box.py -v cluster bash diff --git a/qa/suites/orch/cephadm/upgrade/3-upgrade/staggered.yaml b/qa/suites/orch/cephadm/upgrade/3-upgrade/staggered.yaml index 46ec56f03e7..280714e4e58 100644 --- a/qa/suites/orch/cephadm/upgrade/3-upgrade/staggered.yaml +++ b/qa/suites/orch/cephadm/upgrade/3-upgrade/staggered.yaml @@ -59,14 +59,14 @@ tasks: - ceph orch ps - ceph orch ls - ceph versions - # to make sure mgr daemons upgrade is fully completed, including being deployed by a mgr on new new version + # to make sure mgr daemons upgrade is fully completed, including being deployed by a mgr on a new version # also serves as an early failure if manually upgrading the mgrs failed as --daemon-types won't be recognized - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done # verify only one version found for mgrs and that their version hash matches what we are upgrading to - ceph versions | jq -e '.mgr | length == 1' - ceph versions | jq -e '.mgr | keys' | grep $sha1 - # verify overall we still se two versions, basically to make sure --daemon-types wans't ignored and all daemons upgraded + # verify overall we still see two versions, basically to make sure --daemon-types wasn't ignored and all daemons upgraded - ceph versions | jq -e '.overall | length == 2' # check that exactly two daemons have been upgraded to the new image (our 2 mgr daemons) - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2' diff --git a/qa/workunits/cephadm/test_cephadm.sh b/qa/workunits/cephadm/test_cephadm.sh index 53357b849e5..cca9cbc7bbc 100755 --- a/qa/workunits/cephadm/test_cephadm.sh +++ b/qa/workunits/cephadm/test_cephadm.sh @@ -295,7 +295,7 @@ $SUDO vgremove -f $OSD_VG_NAME || true $SUDO losetup $loop_dev $TMPDIR/$OSD_IMAGE_NAME $SUDO pvcreate $loop_dev && $SUDO vgcreate $OSD_VG_NAME $loop_dev -# osd boostrap keyring +# osd bootstrap keyring $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \ ceph auth get client.bootstrap-osd > $TMPDIR/keyring.bootstrap.osd diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index b3d0c1621db..4d90573ad5b 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -438,7 +438,7 @@ class SNMPGateway: @staticmethod def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]: - """Return the version of the notifer from it's http endpoint""" + """Return the version of the notifier from it's http endpoint""" path = os.path.join(ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta') try: with open(path, 'r') as env: @@ -931,7 +931,7 @@ class CephIscsi(object): # type: () -> CephContainer # daemon_id, is used to generated the cid and pid files used by podman but as both tcmu-runner # and rbd-target-api have the same daemon_id, it conflits and prevent the second container from - # starting. .tcmu runner is appened to the daemon_id to fix that. + # starting. .tcmu runner is appended to the daemon_id to fix that. tcmu_container = get_container(self.ctx, self.fsid, self.daemon_type, str(self.daemon_id) + '.tcmu') tcmu_container.entrypoint = '/usr/bin/tcmu-runner' tcmu_container.cname = self.get_container_name(desc='tcmu') @@ -1593,7 +1593,7 @@ class FileLock(object): def release(self, force: bool = False) -> None: """ Releases the file lock. - Please note, that the lock is only completly released, if the lock + Please note, that the lock is only completely released, if the lock counter is 0. Also note, that the lock file itself is not automatically deleted. :arg bool force: @@ -2150,7 +2150,7 @@ def infer_fsid(func: FuncT) -> FuncT: def infer_config(func: FuncT) -> FuncT: """ - Infer the clusater configuration using the followign priority order: + Infer the cluster configuration using the following priority order: 1- if the user has provided custom conf file (-c option) use it 2- otherwise if daemon --name has been provided use daemon conf 3- otherwise find the mon daemon conf file and use it (if v1) @@ -2185,7 +2185,7 @@ def infer_config(func: FuncT) -> FuncT: if 'fsid' in ctx and ctx.fsid: name = ctx.name if ('name' in ctx and ctx.name) else get_mon_daemon_name(ctx.fsid) if name is not None: - # daemon name has been specified (or inffered from mon), let's use its conf + # daemon name has been specified (or inferred from mon), let's use its conf ctx.config = config_path(name.split('.', 1)[0], name.split('.', 1)[1]) else: # no daemon, in case the cluster has a config dir then use it @@ -4975,7 +4975,7 @@ def parse_mon_addrv(addrv_arg: str) -> List[EndPoint]: addrv_args = [] addr_arg = addrv_arg if addr_arg[0] != '[' or addr_arg[-1] != ']': - raise Error(f'--mon-addrv value {addr_arg} must use square backets') + raise Error(f'--mon-addrv value {addr_arg} must use square brackets') for addr in addr_arg[1: -1].split(','): hasport = r.findall(addr) @@ -5131,9 +5131,9 @@ def prepare_cluster_network(ctx: CephadmContext) -> Tuple[str, bool]: cluster_network = cp.get('global', 'cluster_network').strip('"').strip("'") if cluster_network: - cluser_nets = set([x.strip() for x in cluster_network.split(',')]) + cluster_nets = set([x.strip() for x in cluster_network.split(',')]) local_subnets = set([x[0] for x in list_networks(ctx).items()]) - for net in cluser_nets: + for net in cluster_nets: if net not in local_subnets: logger.warning(f'The cluster CIDR network {net} is not configured locally.') @@ -7921,14 +7921,14 @@ class Apt(Packager): def kubic_repo_path(self) -> str: return '/etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list' - def kubric_repo_gpgkey_url(self) -> str: + def kubic_repo_gpgkey_url(self) -> str: return '%s/Release.key' % self.kubic_repo_url() - def kubric_repo_gpgkey_path(self) -> str: + def kubic_repo_gpgkey_path(self) -> str: return '/etc/apt/trusted.gpg.d/kubic.release.gpg' def add_kubic_repo(self) -> None: - url = self.kubric_repo_gpgkey_url() + url = self.kubic_repo_gpgkey_url() logger.info('Installing repo GPG key from %s...' % url) try: response = urlopen(url) @@ -7938,7 +7938,7 @@ class Apt(Packager): raise Error('failed to fetch GPG key') key = response.read().decode('utf-8') tmp_key = write_tmp(key, 0, 0) - keyring = self.kubric_repo_gpgkey_path() + keyring = self.kubic_repo_gpgkey_path() call_throws(self.ctx, ['apt-key', '--keyring', keyring, 'add', tmp_key.name]) logger.info('Installing repo file at %s...' % self.kubic_repo_path()) @@ -7947,7 +7947,7 @@ class Apt(Packager): f.write(content) def rm_kubic_repo(self) -> None: - keyring = self.kubric_repo_gpgkey_path() + keyring = self.kubic_repo_gpgkey_path() if os.path.exists(keyring): logger.info('Removing repo GPG key %s...' % keyring) os.unlink(keyring) @@ -8248,7 +8248,7 @@ def command_add_repo(ctx: CephadmContext) -> None: except Exception: raise Error('version must be in the form x.y.z (e.g., 15.2.0)') if ctx.release: - # Pacific =/= pacific in this case, set to undercase to avoid confision + # Pacific =/= pacific in this case, set to undercase to avoid confusion ctx.release = ctx.release.lower() pkg = create_packager(ctx, stable=ctx.release, @@ -8531,7 +8531,7 @@ class HostFacts(): Enclosures are detected by walking the scsi generic sysfs hierarchy. Any device tree that holds an 'enclosure' subdirectory is interpreted as - an enclosure. Once identified the enclosire directory is analysis to + an enclosure. Once identified the enclosure directory is analysis to identify key descriptors that will help relate disks to enclosures and disks to enclosure slots. @@ -8545,7 +8545,7 @@ class HostFacts(): if os.path.exists(enc_path): enc_dirs = glob(os.path.join(enc_path, '*')) if len(enc_dirs) != 1: - # incomplete enclosure spec - expecting ONE dir in the fomrat + # incomplete enclosure spec - expecting ONE dir in the format # host(adapter):bus:target:lun e.g. 16:0:0:0 continue enc_path = enc_dirs[0] @@ -9147,7 +9147,7 @@ class HostFacts(): def command_gather_facts(ctx: CephadmContext) -> None: - """gather_facts is intended to provide host releated metadata to the caller""" + """gather_facts is intended to provide host related metadata to the caller""" host = HostFacts(ctx) print(host.dump()) @@ -9362,7 +9362,7 @@ def _get_parser(): parser_adopt.add_argument( '--force-start', action='store_true', - help='start newly adoped daemon, even if it was not running previously') + help='start newly adopted daemon, even if it was not running previously') parser_adopt.add_argument( '--container-init', action='store_true', @@ -9784,7 +9784,7 @@ def _get_parser(): '--extra-container-args', action='append', default=[], - help='Additional container arguments to apply to deamon' + help='Additional container arguments to apply to daemon' ) parser_deploy.add_argument( '--extra-entrypoint-args', diff --git a/src/cephadm/containers/keepalived/README.md b/src/cephadm/containers/keepalived/README.md index 50f8fa7e99a..bd7b605ac9a 100644 --- a/src/cephadm/containers/keepalived/README.md +++ b/src/cephadm/containers/keepalived/README.md @@ -210,7 +210,7 @@ docker run -d --net=host --cap-add NET_ADMIN \ -e KEEPALIVED_UNICAST_SRC_IP=10.10.0.21 \ -e KEEPALIVED_UNICAST_PEER_0=10.10.0.22 \ -e KEEPALIVED_TRACK_INTERFACE_1=eth0 \ --e KEEPALVED_TRACK_INTERFACE_2=eth1 \ +-e KEEPALIVED_TRACK_INTERFACE_2=eth1 \ -e KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.3/24 dev eth0" \ -e KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1" \ quay.io/ceph/keepalived @@ -226,7 +226,7 @@ docker run -d --net=host --cap-add NET_ADMIN \ -e KEEPALIVED_UNICAST_SRC_IP=10.10.0.22 \ -e KEEPALIVED_UNICAST_PEER_0=10.10.0.21 \ -e KEEPALIVED_TRACK_INTERFACE_1=eth0 \ --e KEEPALVED_TRACK_INTERFACE_2=eth1 \ +-e KEEPALIVED_TRACK_INTERFACE_2=eth1 \ -e KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.3/24 dev eth0" \ -e KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1" \ quay.io/ceph/keepalived diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index 6bdcd4d3b2c..f07858e23ad 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -1387,7 +1387,7 @@ class TestBootstrap(object): ( '192.168.1.1', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, - r'must use square backets', + r'must use square brackets', ), ( '[192.168.1.1]', diff --git a/src/pybind/mgr/cephadm/Vagrantfile b/src/pybind/mgr/cephadm/Vagrantfile index a19a7aaa489..638258c3a53 100644 --- a/src/pybind/mgr/cephadm/Vagrantfile +++ b/src/pybind/mgr/cephadm/Vagrantfile @@ -1,7 +1,7 @@ # vi: set ft=ruby : # # In order to reduce the need of recreating all vagrant boxes everytime they -# get dirty, snaptshot them and revert the snapshot of them instead. +# get dirty, snapshot them and revert the snapshot of them instead. # Two helpful scripts to do this easily can be found here: # https://github.com/Devp00l/vagrant-helper-scripts diff --git a/src/pybind/mgr/cephadm/configchecks.py b/src/pybind/mgr/cephadm/configchecks.py index dc7a098277f..b9dcb18f478 100644 --- a/src/pybind/mgr/cephadm/configchecks.py +++ b/src/pybind/mgr/cephadm/configchecks.py @@ -167,7 +167,7 @@ class CephadmConfigChecks: "os_subscription", self._check_subscription), CephadmCheckDefinition(mgr, "CEPHADM_CHECK_PUBLIC_MEMBERSHIP", - "check that all hosts have a NIC on the Ceph public_netork", + "check that all hosts have a NIC on the Ceph public_network", "public_network", self._check_public_network), CephadmCheckDefinition(mgr, "CEPHADM_CHECK_MTU", @@ -696,7 +696,7 @@ class CephadmConfigChecks: self.active_checks = [] self.skipped_checks = [] - # process all healthchecks that are not explcitly disabled + # process all healthchecks that are not explicitly disabled for health_check in self.health_checks: if check_config.get(health_check.name, '') != 'disabled': self.active_checks.append(health_check.name) diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index 42bb65a7279..305833b23ab 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -311,7 +311,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, type='int', default=None, desc='internal - do not modify', - # used to track track spec and other data migrations. + # used to track spec and other data migrations. ), Option( 'config_dashboard', @@ -615,7 +615,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self.migration = Migrations(self) - _service_clses: Sequence[Type[CephadmService]] = [ + _service_classes: Sequence[Type[CephadmService]] = [ OSDService, NFSService, MonService, MgrService, MdsService, RgwService, RbdMirrorService, GrafanaService, AlertmanagerService, PrometheusService, NodeExporterService, LokiService, PromtailService, CrashService, IscsiService, @@ -626,7 +626,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, # https://github.com/python/mypy/issues/8993 self.cephadm_services: Dict[str, CephadmService] = { - cls.TYPE: cls(self) for cls in _service_clses} # type: ignore + cls.TYPE: cls(self) for cls in _service_classes} # type: ignore self.mgr_service: MgrService = cast(MgrService, self.cephadm_services['mgr']) self.osd_service: OSDService = cast(OSDService, self.cephadm_services['osd']) @@ -1825,7 +1825,7 @@ Then run the following: def exit_host_maintenance(self, hostname: str) -> str: """Exit maintenance mode and return a host to an operational state - Returning from maintnenance will enable the clusters systemd target and + Returning from maintenance will enable the clusters systemd target and start it, and remove any noout that has been added for the host if the host has osd daemons @@ -2454,7 +2454,7 @@ Then run the following: """ Deprecated. Please use `apply()` instead. - Keeping this around to be compapatible to mgr/dashboard + Keeping this around to be compatible to mgr/dashboard """ return [self._apply(spec) for spec in specs] diff --git a/src/pybind/mgr/cephadm/schedule.py b/src/pybind/mgr/cephadm/schedule.py index 09c8c4e43cd..99ee35c9b7a 100644 --- a/src/pybind/mgr/cephadm/schedule.py +++ b/src/pybind/mgr/cephadm/schedule.py @@ -325,7 +325,7 @@ class HostAssignment(object): # TODO: At some point we want to deploy daemons that are on offline hosts # at what point we do this differs per daemon type. Stateless daemons we could - # do quickly to improve availability. Steful daemons we might want to wait longer + # do quickly to improve availability. Stateful daemons we might want to wait longer # to see if the host comes back online existing = existing_active + existing_standby diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py index 18602a764ba..b4436bf53d3 100644 --- a/src/pybind/mgr/cephadm/serve.py +++ b/src/pybind/mgr/cephadm/serve.py @@ -1492,7 +1492,7 @@ class CephadmServe: out, err, code = await self.mgr.ssh._execute_command( host, cmd, stdin=stdin, addr=addr) # if there is an agent on this host, make sure it is using the most recent - # vesion of cephadm binary + # version of cephadm binary if host in self.mgr.inventory: for agent in self.mgr.cache.get_daemons_by_type('agent', host): self.mgr._schedule_daemon_action(agent.name(), 'redeploy') diff --git a/src/pybind/mgr/cephadm/services/cephadmservice.py b/src/pybind/mgr/cephadm/services/cephadmservice.py index d5437c10933..92bedf9168b 100644 --- a/src/pybind/mgr/cephadm/services/cephadmservice.py +++ b/src/pybind/mgr/cephadm/services/cephadmservice.py @@ -249,7 +249,7 @@ class CephadmService(metaclass=ABCMeta): raise NotImplementedError() def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: - # if this is called for a service type where it hasn't explcitly been + # if this is called for a service type where it hasn't explicitly been # defined, return empty Daemon Desc return DaemonDescription() diff --git a/src/pybind/mgr/cephadm/services/osd.py b/src/pybind/mgr/cephadm/services/osd.py index 31771fb5fce..0c097e8f72c 100644 --- a/src/pybind/mgr/cephadm/services/osd.py +++ b/src/pybind/mgr/cephadm/services/osd.py @@ -886,7 +886,7 @@ class OSDRemovalQueue(object): def _ready_to_drain_osds(self) -> List["OSD"]: """ Returns OSDs that are ok to stop and not yet draining. Only returns as many OSDs as can - be accomodated by the 'max_osd_draining_count' config value, considering the number of OSDs + be accommodated by the 'max_osd_draining_count' config value, considering the number of OSDs that are already draining. """ draining_limit = max(1, self.mgr.max_osd_draining_count) diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index db5c2aa70c6..61baf493147 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -622,21 +622,21 @@ class TestCephadm(object): CephadmServe(cephadm_module)._apply_all_services() assert len(cephadm_module.cache.get_daemons_by_type('iscsi')) == 2 - # get a deamons from postaction list (ARRGH sets!!) + # get a daemons from postaction list (ARRGH sets!!) tempset = cephadm_module.requires_post_actions.copy() - tempdeamon1 = tempset.pop() - tempdeamon2 = tempset.pop() + tempdaemon1 = tempset.pop() + tempdaemon2 = tempset.pop() # make sure post actions has 2 daemons in it assert len(cephadm_module.requires_post_actions) == 2 # replicate a host cache that is not in sync when check_daemons is called - tempdd1 = cephadm_module.cache.get_daemon(tempdeamon1) - tempdd2 = cephadm_module.cache.get_daemon(tempdeamon2) + tempdd1 = cephadm_module.cache.get_daemon(tempdaemon1) + tempdd2 = cephadm_module.cache.get_daemon(tempdaemon2) host = 'test1' - if 'test1' not in tempdeamon1: + if 'test1' not in tempdaemon1: host = 'test2' - cephadm_module.cache.rm_daemon(host, tempdeamon1) + cephadm_module.cache.rm_daemon(host, tempdaemon1) # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set('_ceph_get', 'mon_map', { @@ -652,11 +652,11 @@ class TestCephadm(object): CephadmServe(cephadm_module)._check_daemons() _cfg_db.assert_called_once_with([tempdd2]) - # post actions still has the other deamon in it and will run next _check_deamons + # post actions still has the other daemon in it and will run next _check_daemons assert len(cephadm_module.requires_post_actions) == 1 # post actions was missed for a daemon - assert tempdeamon1 in cephadm_module.requires_post_actions + assert tempdaemon1 in cephadm_module.requires_post_actions # put the daemon back in the cache cephadm_module.cache.add_daemon(host, tempdd1) @@ -942,7 +942,7 @@ class TestCephadm(object): c = cephadm_module.create_osds(dg) out = wait(cephadm_module, c) assert out == "Created no osd(s) on host test; already created?" - bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_hsot'), + bad_dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='invalid_host'), data_devices=DeviceSelection(paths=[''])) c = cephadm_module.create_osds(bad_dg) out = wait(cephadm_module, c) diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py index fcdee838bb4..f8395fb8272 100644 --- a/src/pybind/mgr/cephadm/tests/test_scheduling.py +++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -288,7 +288,7 @@ def test_explicit_scheduler(host_key, hosts, # * where e=[], *=any # # + list of known hosts available for scheduling (host_key) -# | + hosts used for explict placement (explicit_key) +# | + hosts used for explicit placement (explicit_key) # | | + count # | | | + existing daemons # | | | | + section (host, label, pattern) @@ -614,7 +614,7 @@ class NodeAssignmentTest(NamedTuple): 'rgw:host1(*:81)', 'rgw:host2(*:81)', 'rgw:host3(*:81)'], [] ), - # label + count_per_host + ports (+ xisting) + # label + count_per_host + ports (+ existing) NodeAssignmentTest( 'rgw', PlacementSpec(count=6, label='foo'), diff --git a/src/pybind/mgr/cephadm/utils.py b/src/pybind/mgr/cephadm/utils.py index ebceba7ceae..08ff4e9c822 100644 --- a/src/pybind/mgr/cephadm/utils.py +++ b/src/pybind/mgr/cephadm/utils.py @@ -61,7 +61,7 @@ def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]: def forall_hosts_wrapper(*args: Any) -> List[T]: from cephadm.module import CephadmOrchestrator - # Some weired logic to make calling functions with multiple arguments work. + # Some weird logic to make calling functions with multiple arguments work. if len(args) == 1: vals = args[0] self = None diff --git a/src/pybind/mgr/cephadm/vagrant.config.example.json b/src/pybind/mgr/cephadm/vagrant.config.example.json index 5b18909245a..9419af6309a 100644 --- a/src/pybind/mgr/cephadm/vagrant.config.example.json +++ b/src/pybind/mgr/cephadm/vagrant.config.example.json @@ -1,6 +1,6 @@ /** - * To use a permenant config copy this file to "vagrant.config.json", - * edit it and remove this comment beacuase comments are not allowed + * To use a permanent config copy this file to "vagrant.config.json", + * edit it and remove this comment because comments are not allowed * in a valid JSON file. */ diff --git a/src/pybind/mgr/orchestrator/_interface.py b/src/pybind/mgr/orchestrator/_interface.py index 40235553227..17e80b5799c 100644 --- a/src/pybind/mgr/orchestrator/_interface.py +++ b/src/pybind/mgr/orchestrator/_interface.py @@ -1343,7 +1343,7 @@ class InventoryFilter(object): Typical use: - filter by host when presentig UI workflow for configuring + filter by host when presenting UI workflow for configuring a particular server. filter by label when not all of estate is Ceph servers, and we want to only learn about the Ceph servers. @@ -1543,7 +1543,7 @@ class OrchestratorClientMixin(Orchestrator): >>> import mgr_module >>> #doctest: +SKIP - ... class MyImplentation(mgr_module.MgrModule, Orchestrator): + ... class MyImplementation(mgr_module.MgrModule, Orchestrator): ... def __init__(self, ...): ... self.orch_client = OrchestratorClientMixin() ... self.orch_client.set_mgr(self.mgr)) @@ -1551,7 +1551,7 @@ class OrchestratorClientMixin(Orchestrator): def set_mgr(self, mgr: MgrModule) -> None: """ - Useable in the Dashbord that uses a global ``mgr`` + Useable in the Dashboard that uses a global ``mgr`` """ self.__mgr = mgr # Make sure we're not overwriting any other `mgr` properties diff --git a/src/python-common/ceph/deployment/drive_group.py b/src/python-common/ceph/deployment/drive_group.py index 3141fd760e5..7fb2b11422b 100644 --- a/src/python-common/ceph/deployment/drive_group.py +++ b/src/python-common/ceph/deployment/drive_group.py @@ -63,7 +63,7 @@ class DeviceSelection(object): self.vendor = vendor #: Size specification of format LOW:HIGH. - #: Can also take the the form :HIGH, LOW: + #: Can also take the form :HIGH, LOW: #: or an exact value (as ceph-volume inventory reports) self.size: Optional[str] = size diff --git a/src/python-common/ceph/deployment/drive_selection/matchers.py b/src/python-common/ceph/deployment/drive_selection/matchers.py index f423c2f43ee..0da23997523 100644 --- a/src/python-common/ceph/deployment/drive_selection/matchers.py +++ b/src/python-common/ceph/deployment/drive_selection/matchers.py @@ -187,7 +187,7 @@ class SizeMatcher(Matcher): # type: (str, str) -> None # The 'key' value is overwritten here because - # the user_defined attribute does not neccessarily + # the user_defined attribute does not necessarily # correspond to the desired attribute # requested from the inventory output Matcher.__init__(self, key, value) @@ -372,7 +372,7 @@ class SizeMatcher(Matcher): if not disk: return False disk_value = self._get_disk_key(disk) - # This doesn't neccessarily have to be a float. + # This doesn't necessarily have to be a float. # The current output from ceph-volume gives a float.. # This may change in the future.. # todo: harden this paragraph @@ -388,7 +388,7 @@ class SizeMatcher(Matcher): if disk_size_in_byte <= self.to_byte( self.high) and disk_size_in_byte >= self.to_byte(self.low): return True - # is a else: return False neccessary here? + # is a else: return False necessary here? # (and in all other branches) logger.debug("Disk didn't match for 'high/low' filter") diff --git a/src/python-common/ceph/deployment/service_spec.py b/src/python-common/ceph/deployment/service_spec.py index 393100b45a7..37a17f1805c 100644 --- a/src/python-common/ceph/deployment/service_spec.py +++ b/src/python-common/ceph/deployment/service_spec.py @@ -639,7 +639,7 @@ class ServiceSpec(object): understanding of what fields are special for a give service type. Note, we'll need to stay compatible with both versions for the - the next two major releases (octoups, pacific). + the next two major releases (octopus, pacific). :param json_spec: A valid dict with ServiceSpec