]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard: upgraded python dev dependencies 26007/head
authoralfonsomthd <almartin@redhat.com>
Thu, 17 Jan 2019 15:53:48 +0000 (16:53 +0100)
committeralfonsomthd <almartin@redhat.com>
Thu, 17 Jan 2019 15:53:48 +0000 (16:53 +0100)
* Fixed linting/style issues found after upgrading:
  pylint, astroid, pycodestyle

Signed-off-by: Alfonso Martínez <almartin@redhat.com>
12 files changed:
src/pybind/mgr/dashboard/controllers/__init__.py
src/pybind/mgr/dashboard/controllers/cephfs.py
src/pybind/mgr/dashboard/controllers/docs.py
src/pybind/mgr/dashboard/controllers/erasure_code_profile.py
src/pybind/mgr/dashboard/controllers/rbd_mirroring.py
src/pybind/mgr/dashboard/module.py
src/pybind/mgr/dashboard/services/access_control.py
src/pybind/mgr/dashboard/services/ceph_service.py
src/pybind/mgr/dashboard/services/rgw_client.py
src/pybind/mgr/dashboard/services/tcmu_service.py
src/pybind/mgr/dashboard/tests/test_tcmu_iscsi.py
src/pybind/mgr/dashboard/tools.py

index ea9781266872d9ca9725239906a370c222f9db8d..a0fa08e2b476f72fc529681c47f133d94442eca6 100644 (file)
@@ -282,7 +282,7 @@ class Task(object):
     def __init__(self, name, metadata, wait_for=5.0, exception_handler=None):
         self.name = name
         if isinstance(metadata, list):
-            self.metadata = dict([(e[1:-1], e) for e in metadata])
+            self.metadata = {e[1:-1]: e for e in metadata}
         else:
             self.metadata = metadata
         self.wait_for = wait_for
index df31d4a4f36eb2d9fb303c5167378159a24973fc..4ff75a28036d92c2c3029d4fb32960c107eca31c 100644 (file)
@@ -204,9 +204,9 @@ class CephFS(RESTController):
             )
 
         df = mgr.get("df")
-        pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
+        pool_stats = {p['id']: p['stats'] for p in df['pools']}
         osdmap = mgr.get("osd_map")
-        pools = dict([(p['pool'], p) for p in osdmap['pools']])
+        pools = {p['pool']: p for p in osdmap['pools']}
         metadata_pool_id = mdsmap['metadata_pool']
         data_pool_ids = mdsmap['data_pools']
 
index a96f404e0d4a4c1f7161b0d63568e721dff48726..789ebc47177c28352dfaddbb2902de810d6a7f9d 100644 (file)
@@ -34,15 +34,15 @@ class Docs(BaseController):
         def_value = param['default'] if 'default' in param else None
         if param_name.startswith("is_"):
             return "boolean"
-        elif "size" in param_name:
+        if "size" in param_name:
             return "integer"
-        elif "count" in param_name:
+        if "count" in param_name:
             return "integer"
-        elif "num" in param_name:
+        if "num" in param_name:
             return "integer"
-        elif isinstance(def_value, bool):
+        if isinstance(def_value, bool):
             return "boolean"
-        elif isinstance(def_value, int):
+        if isinstance(def_value, int):
             return "integer"
         return "string"
 
index 742604beb6307a121aa4fe61c8f782704e428488..34c9f651b3eaa00b3dcc6f1afbaff8a9e8bfad5a 100644 (file)
@@ -59,7 +59,7 @@ class ErasureCodeProfile(RESTController):
             # Because 'shec' is experimental it's not included
             'plugins': config['osd_erasure_code_plugins'].split() + ['shec'],
             'directory': config['erasure_code_dir'],
-            'devices': list(set([device['class'] for device in osd_map_crush['devices']])),
+            'devices': list({device['class'] for device in osd_map_crush['devices']}),
             'failure_domains': [domain['name'] for domain in osd_map_crush['types']],
             'names': [name for name, _ in
                       mgr.get('osd_map').get('erasure_code_profiles', {}).items()]
index b96808f35e9dae6bc5e1eb938884ae7f91194866..cb4f1cea7721f5879258eee2331d001813f6dcbe 100644 (file)
@@ -84,16 +84,16 @@ def get_daemons_and_pools():  # pylint: disable=R0915
             'health': 'Unknown'
         }
         for _, pool_data in daemon['status'].items():
-            if (health['health'] != 'error' and
-                    [k for k, v in pool_data.get('callouts', {}).items()
-                     if v['level'] == 'error']):
+            if (health['health'] != 'error'
+                    and [k for k, v in pool_data.get('callouts', {}).items()
+                         if v['level'] == 'error']):
                 health = {
                     'health_color': 'error',
                     'health': 'Error'
                 }
-            elif (health['health'] != 'error' and
-                  [k for k, v in pool_data.get('callouts', {}).items()
-                   if v['level'] == 'warning']):
+            elif (health['health'] != 'error'
+                  and [k for k, v in pool_data.get('callouts', {}).items()
+                       if v['level'] == 'warning']):
                 health = {
                     'health_color': 'warning',
                     'health': 'Warning'
@@ -157,12 +157,12 @@ def get_daemons_and_pools():  # pylint: disable=R0915
                     stats['image_local_count'] = pool_data.get('image_local_count', 0)
                     stats['image_remote_count'] = pool_data.get('image_remote_count', 0)
 
-                if (stats.get('health_color', '') != 'error' and
-                        pool_data.get('image_error_count', 0) > 0):
+                if (stats.get('health_color', '') != 'error'
+                        and pool_data.get('image_error_count', 0) > 0):
                     stats['health_color'] = 'error'
                     stats['health'] = 'Error'
-                elif (stats.get('health_color', '') != 'error' and
-                      pool_data.get('image_warning_count', 0) > 0):
+                elif (stats.get('health_color', '') != 'error'
+                      and pool_data.get('image_warning_count', 0) > 0):
                     stats['health_color'] = 'warning'
                     stats['health'] = 'Warning'
                 elif stats.get('health', None) is None:
@@ -362,8 +362,8 @@ class RbdMirroringPoolMode(RESTController):
     def set(self, pool_name, mirror_mode=None):
         def _edit(ioctx, mirror_mode=None):
             if mirror_mode:
-                mode_enum = dict([[x[1], x[0]] for x in
-                                  self.MIRROR_MODES.items()]).get(mirror_mode, None)
+                mode_enum = {x[1]: x[0] for x in
+                             self.MIRROR_MODES.items()}.get(mirror_mode, None)
                 if mode_enum is None:
                     raise rbd.Error('invalid mirror mode "{}"'.format(mirror_mode))
 
index 61b77e276719a861774bb824e7bcb70ca5cc56d5..ac45f23bdeeb8d6dec058d77993f78f63330c41e 100644 (file)
@@ -35,7 +35,7 @@ if cherrypy is not None:
     v = StrictVersion(cherrypy.__version__)
     # It was fixed in 3.7.0.  Exact lower bound version is probably earlier,
     # but 3.5.0 is what this monkey patch is tested on.
-    if v >= StrictVersion("3.5.0") and v < StrictVersion("3.7.0"):
+    if StrictVersion("3.5.0") <= v < StrictVersion("3.7.0"):
         from cherrypy.wsgiserver.wsgiserver2 import HTTPConnection,\
                                                     CP_fileobject
 
@@ -337,13 +337,13 @@ class Module(MgrModule, CherryPyConfig):
         res = handle_sso_command(cmd)
         if res[0] != -errno.ENOSYS:
             return res
-        elif cmd['prefix'] == 'dashboard set-jwt-token-ttl':
+        if cmd['prefix'] == 'dashboard set-jwt-token-ttl':
             self.set_module_option('jwt_token_ttl', str(cmd['seconds']))
             return 0, 'JWT token TTL updated', ''
-        elif cmd['prefix'] == 'dashboard get-jwt-token-ttl':
+        if cmd['prefix'] == 'dashboard get-jwt-token-ttl':
             ttl = self.get_module_option('jwt_token_ttl', JwtManager.JWT_TOKEN_TTL)
             return 0, str(ttl), ''
-        elif cmd['prefix'] == 'dashboard create-self-signed-cert':
+        if cmd['prefix'] == 'dashboard create-self-signed-cert':
             self.create_self_signed_cert()
             return 0, 'Self-signed certificate created', ''
 
@@ -377,7 +377,7 @@ class Module(MgrModule, CherryPyConfig):
 
     def get_updated_pool_stats(self):
         df = self.get('df')
-        pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
+        pool_stats = {p['id']: p['stats'] for p in df['pools']}
         now = time.time()
         for pool_id, stats in pool_stats.items():
             for stat_name, stat_val in stats.items():
index 52b87a4aa789c3868c0238515b43c4618ffc8f93..812d7a6c91f76ada99a32541f241c4c68d356871 100644 (file)
@@ -93,17 +93,17 @@ class Role(object):
 # this roles cannot be deleted nor updated
 
 # admin role provides all permissions for all scopes
-ADMIN_ROLE = Role('administrator', 'Administrator', dict([
-    (scope_name, Permission.all_permissions())
+ADMIN_ROLE = Role('administrator', 'Administrator', {
+    scope_name: Permission.all_permissions()
     for scope_name in Scope.all_scopes()
-]))
+})
 
 
 # read-only role provides read-only permission for all scopes
-READ_ONLY_ROLE = Role('read-only', 'Read-Only', dict([
-    (scope_name, [_P.READ]) for scope_name in Scope.all_scopes()
+READ_ONLY_ROLE = Role('read-only', 'Read-Only', {
+    scope_name: [_P.READ] for scope_name in Scope.all_scopes()
     if scope_name != Scope.DASHBOARD_SETTINGS
-]))
+})
 
 
 # block manager role provides all permission for block related scopes
@@ -227,7 +227,7 @@ class User(object):
     @classmethod
     def from_dict(cls, u_dict, roles):
         return User(u_dict['username'], u_dict['password'], u_dict['name'],
-                    u_dict['email'], set([roles[r] for r in u_dict['roles']]),
+                    u_dict['email'], {roles[r] for r in u_dict['roles']},
                     u_dict['lastUpdate'])
 
 
@@ -300,8 +300,8 @@ class AccessControlDB(object):
     def save(self):
         with self.lock:
             db = {
-                'users': dict([(un, u.to_dict()) for un, u in self.users.items()]),
-                'roles': dict([(rn, r.to_dict()) for rn, r in self.roles.items()]),
+                'users': {un: u.to_dict() for un, u in self.users.items()},
+                'roles': {rn: r.to_dict() for rn, r in self.roles.items()},
                 'version': self.version
             }
             mgr.set_store(self.accessdb_config_key(), json.dumps(db))
@@ -343,10 +343,10 @@ class AccessControlDB(object):
             return db
 
         db = json.loads(json_db)
-        roles = dict([(rn, Role.from_dict(r))
-                      for rn, r in db.get('roles', {}).items()])
-        users = dict([(un, User.from_dict(u, dict(roles, **SYSTEM_ROLES)))
-                      for un, u in db.get('users', {}).items()])
+        roles = {rn: Role.from_dict(r)
+                 for rn, r in db.get('roles', {}).items()}
+        users = {un: User.from_dict(u, dict(roles, **SYSTEM_ROLES))
+                 for un, u in db.get('users', {}).items()}
         return cls(db['version'], users, roles)
 
 
index ee536ac363dcc8fc0d8318bd99a2aa5f2e7f0330..a1905afbfd80380e35d2bc3aa1cfaf44c76a3f6a 100644 (file)
@@ -176,7 +176,7 @@ class CephService(object):
         data = mgr.get_counter(svc_type, svc_name, path)[path]
         if not data:
             return [(0, 0.0)]
-        elif len(data) == 1:
+        if len(data) == 1:
             return [(data[0][0], 0.0)]
         return [(data2[0], differentiate(data1, data2)) for data1, data2 in pairwise(data)]
 
index c4728c5855778a7e5ed839d8ab9a723ec35d9141..324f9964fde167ff458d8b00ff5fb716be193d0f 100644 (file)
@@ -402,8 +402,8 @@ class RgwClient(RestClient):
         except RequestException as e:
             if e.status_code == 404:
                 return False
-            else:
-                raise e
+
+            raise e
 
     @RestClient.api_put('/{bucket_name}')
     def create_bucket(self, bucket_name, request=None):
index 9abc5e46e4ec07e8bf120033f87f24c4ad5c730c..02f6558580897412638f4a2ebd0ba474fe94dc59 100644 (file)
@@ -49,8 +49,8 @@ class TcmuService(object):
                     name=metadata['image_name'])
                 perf_key = "{}lock_acquired_time".format(perf_key_prefix)
                 lock_acquired_time = (mgr.get_counter(
-                    'tcmu-runner', service_id, perf_key)[perf_key] or
-                                      [[0, 0]])[-1][1] / 1000000000
+                    'tcmu-runner', service_id, perf_key)[perf_key]
+                                      or [[0, 0]])[-1][1] / 1000000000
                 if lock_acquired_time > image.get('optimized_since', 0):
                     image['optimized_daemon'] = hostname
                     image['optimized_since'] = lock_acquired_time
index d189c12ba2948db4a9ebb099be603ad0a63946af..06faae613e50cf004d65b8a5f0d29cbab4b290cc 100644 (file)
@@ -55,7 +55,7 @@ mocked_get_counter2 = {
 def _get_counter(_daemon_type, daemon_name, _stat):
     if daemon_name == 'ceph-dev1:pool1/image1':
         return mocked_get_counter1
-    elif daemon_name == 'ceph-dev2:pool1/image1':
+    if daemon_name == 'ceph-dev2:pool1/image1':
         return mocked_get_counter2
     return Exception('invalid daemon name')
 
index 5d5561076a1632018137a9c1a6713e0107d5b8ef..c66a4a463cd3f4ee25c417d1d753d81cf65eb85c 100644 (file)
@@ -225,7 +225,7 @@ class ViewCache(object):
                         # pylint: disable=raising-bad-type
                         raise self.exception
                     return ViewCache.VALUE_OK, self.value
-                elif self.value_when is not None:
+                if self.value_when is not None:
                     # We have some data, but it doesn't meet freshness requirements
                     return ViewCache.VALUE_STALE, self.value
                 # We have no data, not even stale data
@@ -832,6 +832,7 @@ def getargspec(func):
             func = func.__wrapped__
     except AttributeError:
         pass
+    # pylint: disable=deprecated-method
     return _getargspec(func)