]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: trailing whitespace (W291)
authorMichael Fritch <mfritch@suse.com>
Fri, 19 Feb 2021 15:09:31 +0000 (08:09 -0700)
committerMichael Fritch <mfritch@suse.com>
Wed, 3 Mar 2021 16:33:31 +0000 (09:33 -0700)
Signed-off-by: Michael Fritch <mfritch@suse.com>
src/cephadm/cephadm
src/cephadm/tox.ini

index ac3bd7ae6abb1e6f7c665210afd3a87595548622..f84eee9ab5af1e1ca25dc3d344952d1637e71323 100755 (executable)
@@ -144,7 +144,7 @@ class CephadmContext:
         if "_conf" in self.__dict__ and hasattr(self._conf, name):
             return getattr(self._conf, name)
         elif "_args" in self.__dict__ and hasattr(self._args, name):
-            return getattr(self._args, name)        
+            return getattr(self._args, name)
         else:
             return super().__getattribute__(name)
 
@@ -1653,7 +1653,7 @@ def infer_config(func):
                         name = daemon['name']
                         break
             if name:
-                config = '/var/lib/ceph/{}/{}/config'.format(ctx.fsid, 
+                config = '/var/lib/ceph/{}/{}/config'.format(ctx.fsid,
                                                              name)
         if config:
             logger.info('Inferring config %s' % config)
@@ -2551,7 +2551,7 @@ def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid,
                 deploy_daemon_units(ctx, fsid, uid, gid, daemon_type, daemon_id,
                                     c, osd_fsid=osd_fsid)
             else:
-                raise RuntimeError("attempting to deploy a daemon without a container image") 
+                raise RuntimeError("attempting to deploy a daemon without a container image")
 
     if not os.path.exists(data_dir + '/unit.created'):
         with open(data_dir + '/unit.created', 'w') as f:
@@ -3253,7 +3253,7 @@ def check_subnet(subnets: str) -> Tuple[int, List[int], str]:
     """
 
     rc = 0
-    versions = set() 
+    versions = set()
     errors = []
     subnet_list = subnets.split(',')
     for subnet in subnet_list:
@@ -3374,7 +3374,7 @@ def prepare_mon_addresses(
 def prepare_cluster_network(ctx: CephadmContext) -> Tuple[str, bool]:
     cluster_network = ""
     ipv6_cluster_network = False
-    # the cluster network may not exist on this node, so all we can do is 
+    # the cluster network may not exist on this node, so all we can do is
     # validate that the address given is valid ipv4 or ipv6 subnet
     if ctx.cluster_network:
         rc, versions, err_msg = check_subnet(ctx.cluster_network)
@@ -5811,7 +5811,7 @@ class Zypper(Packager):
         self.install(['podman'])
 
 
-def create_packager(ctx: CephadmContext, 
+def create_packager(ctx: CephadmContext,
                     stable=None, version=None, branch=None, commit=None):
     distro, distro_version, distro_codename = get_distro()
     if distro in YumDnf.DISTRO_NAMES:
@@ -6454,7 +6454,7 @@ class CephadmCache:
         self.host = {}
         self.lock = RLock()
     
-    @property 
+    @property
     def health(self):
         return {
             "started_epoch_secs": self.started_epoch_secs,
@@ -6569,7 +6569,7 @@ td,th {{
         """Handle *all* GET requests"""
 
         if self.path == '/':
-            # provide a html response if someone hits the root url, to document the 
+            # provide a html response if someone hits the root url, to document the
             # available api endpoints
             return self._fetch_root()
         elif self.path in CephadmDaemonHandler.valid_routes:
@@ -6599,7 +6599,7 @@ td,th {{
                 if tasks['daemons'] == 'inactive':
                     status_code = 204
             elif u == 'disks':
-                data = json.dumps(self.server.cephadm_cache.disks)    
+                data = json.dumps(self.server.cephadm_cache.disks)
                 if tasks['disks'] == 'inactive':
                     status_code = 204
             elif u == 'host':
@@ -6647,7 +6647,7 @@ class CephadmDaemon():
     def __init__(self, ctx: CephadmContext, fsid, daemon_id=None, port=None):
         self.ctx = ctx
         self.fsid = fsid
-        self.daemon_id = daemon_id 
+        self.daemon_id = daemon_id
         if not port:
             self.port = CephadmDaemon.default_port
         else:
@@ -6782,7 +6782,7 @@ class CephadmDaemon():
                             "scrape_timestamp": s_time,
                             "scrape_duration_secs": elapsed,
                             "scrape_errors": errors,
-                            "data": data,                    
+                            "data": data,
                         }
                     )
                     logger.debug(f"completed host-facts scrape - {elapsed}s")
@@ -6792,7 +6792,7 @@ class CephadmDaemon():
         logger.info("host-facts thread stopped")
 
     def _scrape_ceph_volume(self, refresh_interval=15):
-        # we're invoking the ceph_volume command, so we need to set the args that it 
+        # we're invoking the ceph_volume command, so we need to set the args that it
         # expects to use
         self.ctx.command = "inventory --format=json".split()
         self.ctx.fsid = self.fsid
@@ -6820,7 +6820,7 @@ class CephadmDaemon():
                 else:
                     elapsed = time.time() - s_time
 
-                    # if the call to ceph-volume returns junk with the 
+                    # if the call to ceph-volume returns junk with the
                     # json, it won't parse
                     stdout = stream.getvalue()
 
@@ -6841,7 +6841,7 @@ class CephadmDaemon():
                             "scrape_timestamp": s_time,
                             "scrape_duration_secs": elapsed,
                             "scrape_errors": errors,
-                            "data": data, 
+                            "data": data,
                         }
                     )
                     
@@ -6911,7 +6911,7 @@ class CephadmDaemon():
     def reload(self, *args):
         """reload -HUP received
         
-        This is a placeholder function only, and serves to provide the hook that could 
+        This is a placeholder function only, and serves to provide the hook that could
         be exploited later if the exporter evolves to incorporate a config file
         """
         logger.info("Reload request received - ignoring, no action needed")
@@ -6978,7 +6978,7 @@ class CephadmDaemon():
     @property
     def unit_run(self):
         
-        return """set -e 
+        return """set -e
 {py3} {bin_path} exporter --fsid {fsid} --id {daemon_id} --port {port} &""".format(
             py3=shutil.which('python3'),
             bin_path=self.binary_path,
@@ -7032,7 +7032,7 @@ WantedBy=ceph-{fsid}.target
                 f.write(config[filename])
 
         # When __file__ is <stdin> we're being invoked over remoto via the orchestrator, so
-        # we pick up the file from where the orchestrator placed it - otherwise we'll 
+        # we pick up the file from where the orchestrator placed it - otherwise we'll
         # copy it to the binary location for this cluster
         if not __file__ == '<stdin>':
             shutil.copy(__file__,
@@ -7762,7 +7762,7 @@ def _get_parser():
         help='cluster FSID')
     parser_maintenance.add_argument(
         "maintenance_action",
-        type=str, 
+        type=str,
         choices=['enter', 'exit'],
         help="Maintenance action - enter maintenance, or exit maintenance")
     parser_maintenance.set_defaults(func=command_maintenance)
index db34721d1f898deb85eb8c36fa47285b27bb1768..e3365174d7678232e316e990e3005fa97f4fb8a7 100644 (file)
@@ -6,7 +6,6 @@ skipsdist=true
 max-line-length = 100
 ignore =
     E501,
-    W291,
     W293,
     W503,
     W504,