]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/cephadm: Local variable name is assigned to but never used (F841)
authorMichael Fritch <mfritch@suse.com>
Tue, 2 Feb 2021 17:42:36 +0000 (10:42 -0700)
committerMichael Fritch <mfritch@suse.com>
Fri, 5 Feb 2021 18:20:57 +0000 (11:20 -0700)
Signed-off-by: Michael Fritch <mfritch@suse.com>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/serve.py
src/pybind/mgr/cephadm/services/cephadmservice.py
src/pybind/mgr/cephadm/services/ha_rgw.py
src/pybind/mgr/cephadm/services/nfs.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/cephadm/tests/test_osd_removal.py
src/pybind/mgr/cephadm/tests/test_scheduling.py
src/pybind/mgr/cephadm/tests/test_services.py
src/pybind/mgr/cephadm/utils.py
src/pybind/mgr/tox.ini

index e58d10815c3f44b9e2b90258aac3a7a8b7a02c89..85bb7b45b350e46a4fd258a403715ac8f2e306db 100644 (file)
@@ -905,7 +905,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
                                                              error_ok=True, no_fsid=True)
             if code:
                 return 1, '', ('check-host failed:\n' + '\n'.join(err))
-        except OrchestratorError as e:
+        except OrchestratorError:
             self.log.exception(f"check-host failed for '{host}'")
             return 1, '', ('check-host failed:\n'
                            + f"Host '{host}' not found. Use 'ceph orch host ls' to see all managed hosts.")
index 66f33529be6a8084f29a526b5705a9b1aff32834..17e279e52005eb639b5c14e1f9b936bbced6badd 100644 (file)
@@ -99,7 +99,7 @@ class CephadmServe:
     def _serve_sleep(self) -> None:
         sleep_interval = 600
         self.log.debug('Sleeping for %d seconds', sleep_interval)
-        ret = self.mgr.event.wait(sleep_interval)
+        self.mgr.event.wait(sleep_interval)
         self.mgr.event.clear()
 
     def _update_paused_health(self) -> None:
index c344516170b8ac0cb08cec591541d734f15a53f1..e4060152a8b99b0ded1344cf29723a61c9c62c55 100644 (file)
@@ -244,7 +244,6 @@ class CephadmService(metaclass=ABCMeta):
         def plural(count: int) -> str:
             return 'daemon' if count == 1 else 'daemons'
 
-        daemon_count = "only" if number_of_running_daemons == 1 else number_of_running_daemons
         left_count = "no" if num_daemons_left == 0 else num_daemons_left
 
         out = (f'WARNING: Stopping {len(daemon_ids)} out of {number_of_running_daemons} daemons in {service} service. '
@@ -363,7 +362,7 @@ class MonService(CephService):
         Create a new monitor on the given host.
         """
         assert self.TYPE == daemon_spec.daemon_type
-        name, host, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network
+        name, _, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network
 
         # get mon. key
         ret, keyring, err = self.mgr.check_mon_command({
@@ -412,7 +411,7 @@ class MonService(CephService):
         })
         try:
             j = json.loads(out)
-        except Exception as e:
+        except Exception:
             raise OrchestratorError('failed to parse quorum status')
 
         mons = [m['name'] for m in j['monmap']['mons']]
@@ -452,7 +451,7 @@ class MgrService(CephService):
         Create a new manager instance on a host.
         """
         assert self.TYPE == daemon_spec.daemon_type
-        mgr_id, host = daemon_spec.daemon_id, daemon_spec.host
+        mgr_id, _ = daemon_spec.daemon_id, daemon_spec.host
 
         # get mgr. key
         ret, keyring, err = self.mgr.check_mon_command({
@@ -470,7 +469,6 @@ class MgrService(CephService):
         # If this is the case then the dashboard port opened will be only the used
         # as default.
         ports = []
-        config_ports = ''
         ret, mgr_services, err = self.mgr.check_mon_command({
             'prefix': 'mgr services',
         })
@@ -539,7 +537,7 @@ class MdsService(CephService):
 
     def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
         assert self.TYPE == daemon_spec.daemon_type
-        mds_id, host = daemon_spec.daemon_id, daemon_spec.host
+        mds_id, _ = daemon_spec.daemon_id, daemon_spec.host
 
         # get mgr. key
         ret, keyring, err = self.mgr.check_mon_command({
@@ -634,7 +632,7 @@ class RgwService(CephService):
 
     def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
         assert self.TYPE == daemon_spec.daemon_type
-        rgw_id, host = daemon_spec.daemon_id, daemon_spec.host
+        rgw_id, _ = daemon_spec.daemon_id, daemon_spec.host
 
         keyring = self.get_keyring(rgw_id)
 
@@ -674,7 +672,7 @@ class RgwService(CephService):
             try:
                 j = json.loads(out)
                 return j.get('realms', [])
-            except Exception as e:
+            except Exception:
                 raise OrchestratorError('failed to parse realm info')
 
         def create_realm() -> None:
@@ -684,7 +682,7 @@ class RgwService(CephService):
                    'realm', 'create',
                    '--rgw-realm=%s' % spec.rgw_realm,
                    '--default']
-            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)  # noqa: F841
             self.mgr.log.info('created realm: %s' % spec.rgw_realm)
 
         def get_zonegroups() -> List[str]:
@@ -700,7 +698,7 @@ class RgwService(CephService):
             try:
                 j = json.loads(out)
                 return j.get('zonegroups', [])
-            except Exception as e:
+            except Exception:
                 raise OrchestratorError('failed to parse zonegroup info')
 
         def create_zonegroup() -> None:
@@ -710,7 +708,7 @@ class RgwService(CephService):
                    'zonegroup', 'create',
                    '--rgw-zonegroup=default',
                    '--master', '--default']
-            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)  # noqa: F841
             self.mgr.log.info('created zonegroup: default')
 
         def create_zonegroup_if_required() -> None:
@@ -731,7 +729,7 @@ class RgwService(CephService):
             try:
                 j = json.loads(out)
                 return j.get('zones', [])
-            except Exception as e:
+            except Exception:
                 raise OrchestratorError('failed to parse zone info')
 
         def create_zone() -> None:
@@ -742,7 +740,7 @@ class RgwService(CephService):
                    '--rgw-zonegroup=default',
                    '--rgw-zone=%s' % spec.rgw_zone,
                    '--master', '--default']
-            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)  # noqa: F841
             self.mgr.log.info('created zone: %s' % spec.rgw_zone)
 
         changes = False
@@ -765,7 +763,7 @@ class RgwService(CephService):
                    'period', 'update',
                    '--rgw-realm=%s' % spec.rgw_realm,
                    '--commit']
-            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)  # noqa: F841
             self.mgr.log.info('updated period')
 
 
@@ -774,7 +772,7 @@ class RbdMirrorService(CephService):
 
     def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
         assert self.TYPE == daemon_spec.daemon_type
-        daemon_id, host = daemon_spec.daemon_id, daemon_spec.host
+        daemon_id, _ = daemon_spec.daemon_id, daemon_spec.host
 
         ret, keyring, err = self.mgr.check_mon_command({
             'prefix': 'auth get-or-create',
index f8670fdae79782862bacd8830b253466843eb7f8..05dea1eaac84c95faadbd732c29a94d99ea98f24 100644 (file)
@@ -62,7 +62,6 @@ class HA_RGWService(CephService):
 
     def haproxy_generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
         daemon_id = daemon_spec.daemon_id
-        host = daemon_spec.host
 
         service_name: str = "ha-rgw." + daemon_id.split('.')[0]
         # if no service spec, return empty config
index 7ca3723ef820e0f8c43cb263bd7a9ca0378a3ff0..fdd080e3a3d7d90169d3e5e39e620eac69d6fc40 100644 (file)
@@ -110,7 +110,7 @@ class NFSService(CephService):
             exists = True
             try:
                 ioctx.stat(obj)
-            except rados.ObjectNotFound as e:
+            except rados.ObjectNotFound:
                 exists = False
 
             if exists and not clobber:
index 0603720d2a48745770f331aaeed6c5df235a15c1..fcc953d0730ecd589740838fc2086819d8c7e9ec 100644 (file)
@@ -386,7 +386,7 @@ class TestCephadm(object):
     def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
         _mon_cmd.return_value = (1, "", "fail_msg")
         with pytest.raises(OrchestratorError):
-            out = cephadm_module.osd_service.find_destroyed_osds()
+            cephadm_module.osd_service.find_destroyed_osds()
 
     @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
     def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
index 0fb81796ab38058b33a1ec0baa19113c7bdae99e..f93b2c2c41ac483e97c791f861792fc9f59a8d72 100644 (file)
@@ -130,7 +130,7 @@ class TestOSD:
 
     @mock.patch('cephadm.services.osd.OSD.stop_draining')
     def test_stop(self, stop_draining_mock, osd_obj):
-        ret = osd_obj.stop()
+        osd_obj.stop()
         assert osd_obj.started is False
         assert osd_obj.stopped is True
         stop_draining_mock.assert_called_once()
@@ -155,7 +155,7 @@ class TestOSD:
 
     @mock.patch("cephadm.services.osd.RemoveUtil.ok_to_stop")
     def test_is_ok_to_stop(self, _, osd_obj):
-        ret = osd_obj.is_ok_to_stop
+        osd_obj.is_ok_to_stop
         osd_obj.rm_util.ok_to_stop.assert_called_once()
 
     @pytest.mark.parametrize(
@@ -173,27 +173,27 @@ class TestOSD:
 
     @mock.patch("cephadm.services.osd.RemoveUtil.safe_to_destroy")
     def test_safe_to_destroy(self, _, osd_obj):
-        ret = osd_obj.safe_to_destroy()
+        osd_obj.safe_to_destroy()
         osd_obj.rm_util.safe_to_destroy.assert_called_once()
 
     @mock.patch("cephadm.services.osd.RemoveUtil.set_osd_flag")
     def test_down(self, _, osd_obj):
-        ret = osd_obj.down()
+        osd_obj.down()
         osd_obj.rm_util.set_osd_flag.assert_called_with([osd_obj], 'down')
 
     @mock.patch("cephadm.services.osd.RemoveUtil.destroy_osd")
     def test_destroy_osd(self, _, osd_obj):
-        ret = osd_obj.destroy()
+        osd_obj.destroy()
         osd_obj.rm_util.destroy_osd.assert_called_once()
 
     @mock.patch("cephadm.services.osd.RemoveUtil.purge_osd")
     def test_purge(self, _, osd_obj):
-        ret = osd_obj.purge()
+        osd_obj.purge()
         osd_obj.rm_util.purge_osd.assert_called_once()
 
     @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count")
     def test_pg_count(self, _, osd_obj):
-        ret = osd_obj.get_pg_count()
+        osd_obj.get_pg_count()
         osd_obj.rm_util.get_pg_count.assert_called_once()
 
     def test_drain_status_human_not_started(self, osd_obj):
index 071c5c26e72173219cfbf8905bb1a13fc00f34a0..47d7ba54c4079160115ea263d3ddf7f00b3b9f98 100644 (file)
@@ -565,9 +565,9 @@ def test_node_assignment3(service_type, placement, hosts,
     ])
 def test_bad_placements(placement):
     try:
-        s = PlacementSpec.from_string(placement.split(' '))
+        PlacementSpec.from_string(placement.split(' '))
         assert False
-    except ServiceSpecValidationError as e:
+    except ServiceSpecValidationError:
         pass
 
 
index 11f75ed9a24bb905219d92f3d4865581a39979cd..4891b81ac00b214e901e93bde309b3506f049a47 100644 (file)
@@ -117,8 +117,8 @@ class TestCephadmService:
         assert "client.crash.host" == \
             cephadm_services["crash"].get_auth_entity("id1", "host")
         with pytest.raises(OrchestratorError):
-            t = cephadm_services["crash"].get_auth_entity("id1", "")
-            t = cephadm_services["crash"].get_auth_entity("id1")
+            cephadm_services["crash"].get_auth_entity("id1", "")
+            cephadm_services["crash"].get_auth_entity("id1")
 
         assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "host")
         assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "")
index 12574437b13c7d718d9d332477ce8d3b46bf1286..d5e9ab00353d900b24b0817e8dec56ccc537c89d 100644 (file)
@@ -68,7 +68,7 @@ def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]:
                 if self:
                     return f(self, *arg)
                 return f(*arg)
-            except Exception as e:
+            except Exception:
                 logger.exception(f'executing {f.__name__}({args}) failed.')
                 raise
 
index b365e48048456849aabcc9a5f5ddd2a3e4a8d12e..d2951143e55427731abd9847df5b5fc21a3034da 100644 (file)
@@ -14,7 +14,6 @@ ignore =
     E501,
     F401,
     F811,
-    F841,
     W503,
 exclude =
     .tox,