From: Michael Fritch Date: Tue, 2 Feb 2021 17:42:36 +0000 (-0700) Subject: mgr/cephadm: Local variable name is assigned to but never used (F841) X-Git-Tag: v16.2.0~178^2~30 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=dcf65354ad62232cc6ae1deb8ce57abd4160b825;p=ceph.git mgr/cephadm: Local variable name is assigned to but never used (F841) Signed-off-by: Michael Fritch (cherry picked from commit 97c6a8564ecbc5f39a1907d132b6646f55588326) --- diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index 418ad9f7818b1..62e940d0cf3bb 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -913,7 +913,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, error_ok=True, no_fsid=True) if code: return 1, '', ('check-host failed:\n' + '\n'.join(err)) - except OrchestratorError as e: + except OrchestratorError: self.log.exception(f"check-host failed for '{host}'") return 1, '', ('check-host failed:\n' + f"Host '{host}' not found. Use 'ceph orch host ls' to see all managed hosts.") diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py index a592397c48ed8..6488f4495b34d 100644 --- a/src/pybind/mgr/cephadm/serve.py +++ b/src/pybind/mgr/cephadm/serve.py @@ -99,7 +99,7 @@ class CephadmServe: def _serve_sleep(self) -> None: sleep_interval = 600 self.log.debug('Sleeping for %d seconds', sleep_interval) - ret = self.mgr.event.wait(sleep_interval) + self.mgr.event.wait(sleep_interval) self.mgr.event.clear() def _update_paused_health(self) -> None: diff --git a/src/pybind/mgr/cephadm/services/cephadmservice.py b/src/pybind/mgr/cephadm/services/cephadmservice.py index a95ee66ee9aac..297d5594a2c9b 100644 --- a/src/pybind/mgr/cephadm/services/cephadmservice.py +++ b/src/pybind/mgr/cephadm/services/cephadmservice.py @@ -254,7 +254,6 @@ class CephadmService(metaclass=ABCMeta): def plural(count: int) -> str: return 'daemon' if count == 1 else 'daemons' - daemon_count = "only" if number_of_running_daemons == 1 else number_of_running_daemons left_count = "no" if num_daemons_left == 0 else num_daemons_left if alert: @@ -378,7 +377,7 @@ class MonService(CephService): Create a new monitor on the given host. """ assert self.TYPE == daemon_spec.daemon_type - name, host, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network + name, _, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network # get mon. key ret, keyring, err = self.mgr.check_mon_command({ @@ -427,7 +426,7 @@ class MonService(CephService): }) try: j = json.loads(out) - except Exception as e: + except Exception: raise OrchestratorError('failed to parse quorum status') mons = [m['name'] for m in j['monmap']['mons']] @@ -467,7 +466,7 @@ class MgrService(CephService): Create a new manager instance on a host. """ assert self.TYPE == daemon_spec.daemon_type - mgr_id, host = daemon_spec.daemon_id, daemon_spec.host + mgr_id, _ = daemon_spec.daemon_id, daemon_spec.host # get mgr. key ret, keyring, err = self.mgr.check_mon_command({ @@ -485,7 +484,6 @@ class MgrService(CephService): # If this is the case then the dashboard port opened will be only the used # as default. ports = [] - config_ports = '' ret, mgr_services, err = self.mgr.check_mon_command({ 'prefix': 'mgr services', }) @@ -569,7 +567,7 @@ class MdsService(CephService): def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec: assert self.TYPE == daemon_spec.daemon_type - mds_id, host = daemon_spec.daemon_id, daemon_spec.host + mds_id, _ = daemon_spec.daemon_id, daemon_spec.host # get mgr. key ret, keyring, err = self.mgr.check_mon_command({ @@ -664,7 +662,7 @@ class RgwService(CephService): def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec: assert self.TYPE == daemon_spec.daemon_type - rgw_id, host = daemon_spec.daemon_id, daemon_spec.host + rgw_id, _ = daemon_spec.daemon_id, daemon_spec.host keyring = self.get_keyring(rgw_id) @@ -704,7 +702,7 @@ class RgwService(CephService): try: j = json.loads(out) return j.get('realms', []) - except Exception as e: + except Exception: raise OrchestratorError('failed to parse realm info') def create_realm() -> None: @@ -714,7 +712,7 @@ class RgwService(CephService): 'realm', 'create', '--rgw-realm=%s' % spec.rgw_realm, '--default'] - result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: F841 self.mgr.log.info('created realm: %s' % spec.rgw_realm) def get_zonegroups() -> List[str]: @@ -730,7 +728,7 @@ class RgwService(CephService): try: j = json.loads(out) return j.get('zonegroups', []) - except Exception as e: + except Exception: raise OrchestratorError('failed to parse zonegroup info') def create_zonegroup() -> None: @@ -740,7 +738,7 @@ class RgwService(CephService): 'zonegroup', 'create', '--rgw-zonegroup=default', '--master', '--default'] - result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: F841 self.mgr.log.info('created zonegroup: default') def create_zonegroup_if_required() -> None: @@ -761,7 +759,7 @@ class RgwService(CephService): try: j = json.loads(out) return j.get('zones', []) - except Exception as e: + except Exception: raise OrchestratorError('failed to parse zone info') def create_zone() -> None: @@ -772,7 +770,7 @@ class RgwService(CephService): '--rgw-zonegroup=default', '--rgw-zone=%s' % spec.rgw_zone, '--master', '--default'] - result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: F841 self.mgr.log.info('created zone: %s' % spec.rgw_zone) changes = False @@ -795,7 +793,7 @@ class RgwService(CephService): 'period', 'update', '--rgw-realm=%s' % spec.rgw_realm, '--commit'] - result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: F841 self.mgr.log.info('updated period') @@ -804,7 +802,7 @@ class RbdMirrorService(CephService): def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec: assert self.TYPE == daemon_spec.daemon_type - daemon_id, host = daemon_spec.daemon_id, daemon_spec.host + daemon_id, _ = daemon_spec.daemon_id, daemon_spec.host ret, keyring, err = self.mgr.check_mon_command({ 'prefix': 'auth get-or-create', diff --git a/src/pybind/mgr/cephadm/services/ha_rgw.py b/src/pybind/mgr/cephadm/services/ha_rgw.py index f8670fdae7978..05dea1eaac84c 100644 --- a/src/pybind/mgr/cephadm/services/ha_rgw.py +++ b/src/pybind/mgr/cephadm/services/ha_rgw.py @@ -62,7 +62,6 @@ class HA_RGWService(CephService): def haproxy_generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]: daemon_id = daemon_spec.daemon_id - host = daemon_spec.host service_name: str = "ha-rgw." + daemon_id.split('.')[0] # if no service spec, return empty config diff --git a/src/pybind/mgr/cephadm/services/nfs.py b/src/pybind/mgr/cephadm/services/nfs.py index 7ca3723ef820e..fdd080e3a3d7d 100644 --- a/src/pybind/mgr/cephadm/services/nfs.py +++ b/src/pybind/mgr/cephadm/services/nfs.py @@ -110,7 +110,7 @@ class NFSService(CephService): exists = True try: ioctx.stat(obj) - except rados.ObjectNotFound as e: + except rados.ObjectNotFound: exists = False if exists and not clobber: diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index 4979045ae7679..fcaebe533ba95 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -386,7 +386,7 @@ class TestCephadm(object): def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module): _mon_cmd.return_value = (1, "", "fail_msg") with pytest.raises(OrchestratorError): - out = cephadm_module.osd_service.find_destroyed_osds() + cephadm_module.osd_service.find_destroyed_osds() @mock.patch("cephadm.serve.CephadmServe._run_cephadm") def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): diff --git a/src/pybind/mgr/cephadm/tests/test_osd_removal.py b/src/pybind/mgr/cephadm/tests/test_osd_removal.py index 0fb81796ab380..f93b2c2c41ac4 100644 --- a/src/pybind/mgr/cephadm/tests/test_osd_removal.py +++ b/src/pybind/mgr/cephadm/tests/test_osd_removal.py @@ -130,7 +130,7 @@ class TestOSD: @mock.patch('cephadm.services.osd.OSD.stop_draining') def test_stop(self, stop_draining_mock, osd_obj): - ret = osd_obj.stop() + osd_obj.stop() assert osd_obj.started is False assert osd_obj.stopped is True stop_draining_mock.assert_called_once() @@ -155,7 +155,7 @@ class TestOSD: @mock.patch("cephadm.services.osd.RemoveUtil.ok_to_stop") def test_is_ok_to_stop(self, _, osd_obj): - ret = osd_obj.is_ok_to_stop + osd_obj.is_ok_to_stop osd_obj.rm_util.ok_to_stop.assert_called_once() @pytest.mark.parametrize( @@ -173,27 +173,27 @@ class TestOSD: @mock.patch("cephadm.services.osd.RemoveUtil.safe_to_destroy") def test_safe_to_destroy(self, _, osd_obj): - ret = osd_obj.safe_to_destroy() + osd_obj.safe_to_destroy() osd_obj.rm_util.safe_to_destroy.assert_called_once() @mock.patch("cephadm.services.osd.RemoveUtil.set_osd_flag") def test_down(self, _, osd_obj): - ret = osd_obj.down() + osd_obj.down() osd_obj.rm_util.set_osd_flag.assert_called_with([osd_obj], 'down') @mock.patch("cephadm.services.osd.RemoveUtil.destroy_osd") def test_destroy_osd(self, _, osd_obj): - ret = osd_obj.destroy() + osd_obj.destroy() osd_obj.rm_util.destroy_osd.assert_called_once() @mock.patch("cephadm.services.osd.RemoveUtil.purge_osd") def test_purge(self, _, osd_obj): - ret = osd_obj.purge() + osd_obj.purge() osd_obj.rm_util.purge_osd.assert_called_once() @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count") def test_pg_count(self, _, osd_obj): - ret = osd_obj.get_pg_count() + osd_obj.get_pg_count() osd_obj.rm_util.get_pg_count.assert_called_once() def test_drain_status_human_not_started(self, osd_obj): diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py index 071c5c26e7217..47d7ba54c4079 100644 --- a/src/pybind/mgr/cephadm/tests/test_scheduling.py +++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -565,9 +565,9 @@ def test_node_assignment3(service_type, placement, hosts, ]) def test_bad_placements(placement): try: - s = PlacementSpec.from_string(placement.split(' ')) + PlacementSpec.from_string(placement.split(' ')) assert False - except ServiceSpecValidationError as e: + except ServiceSpecValidationError: pass diff --git a/src/pybind/mgr/cephadm/tests/test_services.py b/src/pybind/mgr/cephadm/tests/test_services.py index 11f75ed9a24bb..4891b81ac00b2 100644 --- a/src/pybind/mgr/cephadm/tests/test_services.py +++ b/src/pybind/mgr/cephadm/tests/test_services.py @@ -117,8 +117,8 @@ class TestCephadmService: assert "client.crash.host" == \ cephadm_services["crash"].get_auth_entity("id1", "host") with pytest.raises(OrchestratorError): - t = cephadm_services["crash"].get_auth_entity("id1", "") - t = cephadm_services["crash"].get_auth_entity("id1") + cephadm_services["crash"].get_auth_entity("id1", "") + cephadm_services["crash"].get_auth_entity("id1") assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "host") assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "") diff --git a/src/pybind/mgr/cephadm/utils.py b/src/pybind/mgr/cephadm/utils.py index 12574437b13c7..d5e9ab00353d9 100644 --- a/src/pybind/mgr/cephadm/utils.py +++ b/src/pybind/mgr/cephadm/utils.py @@ -68,7 +68,7 @@ def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]: if self: return f(self, *arg) return f(*arg) - except Exception as e: + except Exception: logger.exception(f'executing {f.__name__}({args}) failed.') raise diff --git a/src/pybind/mgr/tox.ini b/src/pybind/mgr/tox.ini index 0cfc1d0cc0380..a13e3d14589ff 100644 --- a/src/pybind/mgr/tox.ini +++ b/src/pybind/mgr/tox.ini @@ -14,7 +14,6 @@ ignore = E501, F401, F811, - F841, W503, exclude = .tox,