]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/nfs: flake8
authorSebastian Wagner <sewagner@redhat.com>
Wed, 2 Jun 2021 14:04:21 +0000 (16:04 +0200)
committerSage Weil <sage@newdream.net>
Thu, 17 Jun 2021 20:19:50 +0000 (16:19 -0400)
Signed-off-by: Sebastian Wagner <sewagner@redhat.com>
src/pybind/mgr/nfs/__init__.py
src/pybind/mgr/nfs/cluster.py
src/pybind/mgr/nfs/export.py
src/pybind/mgr/nfs/export_utils.py
src/pybind/mgr/nfs/module.py
src/pybind/mgr/nfs/tests/test_nfs.py
src/pybind/mgr/nfs/utils.py
src/pybind/mgr/tox.ini

index 82a8bca5a94504ecac15f554f96e5a2522a65641..4e22577887e8b1ac39fbdc082f37153609ebaccd 100644 (file)
@@ -1,3 +1,5 @@
+# flake8: noqa
+
 import os
 if 'UNITTEST' in os.environ:
     import tests
index d2d9e74b46f08ed38fc96002412ea23f79974af6..92c528957790bf5542a52c068bbdb1de993c7d36 100644 (file)
@@ -1,7 +1,7 @@
 import logging
-import socket
 import json
 import re
+import socket
 from typing import cast, Dict, List, Any, Union, Optional, TypeVar, Callable, TYPE_CHECKING, Tuple
 
 from ceph.deployment.service_spec import NFSServiceSpec, PlacementSpec, IngressSpec
@@ -233,7 +233,7 @@ class NFSCluster:
             raise ClusterNotFound()
         except NotImplementedError:
             return 0, "NFS-Ganesha Config Added Successfully "\
-                    "(Manual Restart of NFS PODS required)", ""
+                "(Manual Restart of NFS PODS required)", ""
         except Exception as e:
             return exception_handler(e, f"Setting NFS-Ganesha Config failed for {cluster_id}")
 
@@ -251,6 +251,6 @@ class NFSCluster:
             raise ClusterNotFound()
         except NotImplementedError:
             return 0, "NFS-Ganesha Config Removed Successfully "\
-                    "(Manual Restart of NFS PODS required)", ""
+                "(Manual Restart of NFS PODS required)", ""
         except Exception as e:
             return exception_handler(e, f"Resetting NFS-Ganesha Config failed for {cluster_id}")
index 924575927c3504a4d892e75a4690b126509cfd6e..44814663a62853b59cf75d45433277d8b237151e 100644 (file)
@@ -2,14 +2,14 @@ import errno
 import json
 import logging
 from typing import List, Any, Dict, Tuple, Optional, TYPE_CHECKING, TypeVar, Callable, cast
-from os.path import isabs, normpath
+from os.path import normpath
 
 from rados import TimedOut, ObjectNotFound
 
 from .export_utils import GaneshaConfParser, Export, RawBlock
 from .exception import NFSException, NFSInvalidOperation, NFSObjectNotFound, FSNotFound, \
-        ClusterNotFound
-from .utils import POOL_NAME, available_clusters, restart_nfs_service, check_fs
+    ClusterNotFound
+from .utils import POOL_NAME, available_clusters, check_fs
 
 if TYPE_CHECKING:
     from nfs.module import Module
@@ -112,7 +112,7 @@ class ExportMgr:
         try:
             ioctx.notify(obj)
         except TimedOut:
-            log.exception(f"Ganesha timed out")
+            log.exception("Ganesha timed out")
 
     @property
     def exports(self) -> Dict[str, List[Export]]:
@@ -140,7 +140,7 @@ class ExportMgr:
         self.mgr.check_mon_command({
             'prefix': 'auth rm',
             'entity': 'client.{}'.format(entity),
-            })
+        })
         log.info(f"Export user deleted is {entity}")
 
     def _gen_export_id(self) -> int:
@@ -188,7 +188,7 @@ class ExportMgr:
             if export:
                 if pseudo_path:
                     NFSRados(self.mgr, self.rados_namespace).remove_obj(
-                             f'export-{export.export_id}', f'conf-nfs.{cluster_id}')
+                        f'export-{export.export_id}', f'conf-nfs.{cluster_id}')
                 self.exports[cluster_id].remove(export)
                 self._delete_user(export.fsal.user_id)
                 if not self.exports[cluster_id]:
@@ -204,7 +204,7 @@ class ExportMgr:
             with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
                 ioctx.set_namespace(self.rados_namespace)
                 export = Export.from_export_block(GaneshaConfParser(ioctx.read(f"export-{ex_id}"
-                    ).decode("utf-8")).parse()[0], self.rados_namespace)
+                                                                               ).decode("utf-8")).parse()[0], self.rados_namespace)
                 return export
         except ObjectNotFound:
             log.exception(f"Export ID: {ex_id} not found")
@@ -214,8 +214,8 @@ class ExportMgr:
         assert self.rados_namespace
         self.exports[self.rados_namespace].append(export)
         NFSRados(self.mgr, self.rados_namespace).update_obj(
-                GaneshaConfParser.write_block(export.to_export_block()),
-                f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}')
+            GaneshaConfParser.write_block(export.to_export_block()),
+            f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}')
 
     def format_path(self, path: str) -> str:
         if path:
@@ -311,7 +311,7 @@ class FSExport(ExportMgr):
 
     def _update_user_id(self, path: str, access_type: str, fs_name: str, user_id: str) -> None:
         osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
-                self.rados_pool, self.rados_namespace, fs_name)
+            self.rados_pool, self.rados_namespace, fs_name)
         access_type = 'r' if access_type == 'RO' else 'rw'
 
         self.mgr.check_mon_command({
@@ -319,13 +319,13 @@ class FSExport(ExportMgr):
             'entity': f'client.{user_id}',
             'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format(
                 access_type, path)],
-            })
+        })
 
         log.info(f"Export user updated {user_id}")
 
     def _create_user_key(self, entity: str, path: str, fs_name: str, fs_ro: bool) -> Tuple[str, str]:
         osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
-                self.rados_pool, self.rados_namespace, fs_name)
+            self.rados_pool, self.rados_namespace, fs_name)
         access_type = 'r' if fs_ro else 'rw'
 
         ret, out, err = self.mgr.check_mon_command({
@@ -334,7 +334,7 @@ class FSExport(ExportMgr):
             'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format(
                 access_type, path)],
             'format': 'json',
-            })
+        })
 
         json_res = json.loads(out)
         log.info("Export user created is {}".format(json_res[0]['entity']))
@@ -396,7 +396,7 @@ class FSExport(ExportMgr):
                           pseudo_path: str,
                           read_only: bool,
                           squash: str,
-                          clients: list=[]) -> Tuple[int, str, str]:
+                          clients: list = []) -> Tuple[int, str, str]:
         pseudo_path = self.format_path(pseudo_path)
 
         if cluster_id not in self.exports:
@@ -404,7 +404,7 @@ class FSExport(ExportMgr):
 
         if not self._fetch_export(cluster_id, pseudo_path):
             # generate access+secret keys
-            
+
             ex_id = self._gen_export_id()
             if clients:
                 access_type = "none"
@@ -420,9 +420,9 @@ class FSExport(ExportMgr):
                 'squash': squash,
                 'fsal': {
                     "name": "RGW",
-                    #"user_id": user_id,
-                    #"access_key_id": access_key_id,
-                    #"secret_access_key": secret_access_key,
+                    # "user_id": user_id,
+                    # "access_key_id": access_key_id,
+                    # "secret_access_key": secret_access_key,
                 },
                 'clients': clients
             }
index 620e0db239def65ca7ccef5973be5d9312d775c7..8a8a09b5cc7e65d46471fcec5b9934bfa2b1909a 100644 (file)
@@ -53,7 +53,7 @@ class GaneshaConfParser:
         if idx == -1:
             raise Exception(f"Cannot find block name at {self.last_context()}")
         block_name = self.stream()[:idx]
-        self.pos += idx+1
+        self.pos += idx + 1
         return block_name
 
     def parse_block_or_section(self) -> RawBlock:
@@ -66,7 +66,7 @@ class GaneshaConfParser:
                 self.pos += len(value)
             else:
                 value = self.stream()[:idx]
-                self.pos += idx+1
+                self.pos += idx + 1
             block_dict = RawBlock('%url', values={'value': value})
             return block_dict
 
@@ -98,9 +98,9 @@ class GaneshaConfParser:
             raise Exception("Malformed stanza: no equal symbol found.")
         semicolon_idx = self.stream().find(';')
         parameter_name = self.stream()[:equal_idx].lower()
-        parameter_value = self.stream()[equal_idx+1:semicolon_idx]
+        parameter_value = self.stream()[equal_idx + 1:semicolon_idx]
         block_dict.values[parameter_name] = self.parse_parameter_value(parameter_value)
-        self.pos += semicolon_idx+1
+        self.pos += semicolon_idx + 1
 
     def parse_block_body(self, block_dict: RawBlock) -> None:
         while True:
@@ -117,8 +117,8 @@ class GaneshaConfParser:
 
             if is_semicolon and ((is_lbracket and is_semicolon_lt_lbracket) or not is_lbracket):
                 self.parse_stanza(block_dict)
-            elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket) or
-                                  (not is_semicolon)):
+            elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket)
+                                  or (not is_semicolon)):
                 block_dict.blocks.append(self.parse_block_or_section())
             else:
                 raise Exception("Malformed stanza: no semicolon found.")
@@ -135,7 +135,7 @@ class GaneshaConfParser:
     @staticmethod
     def _indentation(depth: int, size: int = 4) -> str:
         conf_str = ""
-        for _ in range(0, depth*size):
+        for _ in range(0, depth * size):
             conf_str += " "
         return conf_str
 
@@ -170,7 +170,7 @@ class GaneshaConfParser:
         conf_str += GaneshaConfParser._indentation(depth)
         conf_str += format(block.block_name)
         conf_str += " {\n"
-        conf_str += GaneshaConfParser.write_block_body(block, depth+1)
+        conf_str += GaneshaConfParser.write_block_body(block, depth + 1)
         conf_str += GaneshaConfParser._indentation(depth)
         conf_str += "}\n"
         return conf_str
@@ -197,10 +197,10 @@ class FSAL(object):
         raise NFSInvalidOperation(f'Unknown FSAL {fsal_block.values.get("name")}')
 
     def to_fsal_block(self) -> RawBlock:
-        raise NotImplemented
+        raise NotImplementedError
 
     def to_dict(self) -> Dict[str, Any]:
-        raise NotImplemented
+        raise NotImplementedError
 
 
 class CephFSFSAL(FSAL):
@@ -502,7 +502,7 @@ class Export:
             if not fs.fs_name or not check_fs(mgr, fs.fs_name):
                 raise FSNotFound(fs.fs_name)
         elif self.fsal.name == 'RGW':
-            rgw = cast(RGWFSAL, self.fsal)
+            rgw = cast(RGWFSAL, self.fsal)  # noqa
             pass
         else:
             raise NFSInvalidOperation('FSAL {self.fsal.name} not supported')
index 032b1bf7c220d6cb582e28cc352459d9a6f958ca..ca4901e7295a4a5967571083fa59a3aadf272134 100644 (file)
@@ -111,9 +111,9 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
     @CLICommand('nfs cluster create', perm='rw')
     def _cmd_nfs_cluster_create(self,
                                 clusterid: str,
-                                placement: Optional[str]=None,
-                                ingress: Optional[bool]=None,
-                                virtual_ip: Optional[str]=None) -> Tuple[int, str, str]:
+                                placement: Optional[str] = None,
+                                ingress: Optional[bool] = None,
+                                virtual_ip: Optional[str] = None) -> Tuple[int, str, str]:
         """Create an NFS Cluster"""
         return self.nfs.create_nfs_cluster(cluster_id=clusterid, placement=placement,
                                            virtual_ip=virtual_ip, ingress=ingress)
index a54e317880b598cfad97dcebf5ee9b09c16c6544..2d2b8e4e518c6f7e75c1d302fba836bb1e5b37cb 100644 (file)
@@ -1,3 +1,5 @@
+# flake8: noqa
+
 from typing import Optional, Tuple, Iterator, List, Any, Dict
 
 from contextlib import contextmanager
@@ -131,7 +133,6 @@ EXPORT
         }
     }
 
-
     class RObject(object):
         def __init__(self, key: str, raw: str) -> None:
             self.key = key
@@ -224,8 +225,7 @@ EXPORT
         """
 
         with mock.patch('nfs.module.Module.describe_service') as describe_service, \
-             mock.patch('nfs.module.Module.rados') as rados:
-
+                mock.patch('nfs.module.Module.rados') as rados:
 
             rados.open_ioctx.return_value.__enter__.return_value = self.io_mock
             rados.open_ioctx.return_value.__exit__ = mock.Mock(return_value=None)
@@ -284,7 +284,7 @@ EXPORT
         assert export.fsal.sec_label_xattr == None
         assert len(export.clients) == 2
         assert export.clients[0].addresses == \
-               ["192.168.0.10", "192.168.1.0/8"]
+            ["192.168.0.10", "192.168.1.0/8"]
         # assert export.clients[0].squash ==  "no_root_squash"  # probably correct value
         assert export.clients[0].squash == "None"
         assert export.clients[0].access_type is None
@@ -315,9 +315,9 @@ EXPORT
         assert export.protocols == [4, 3]
         assert set(export.transports) == {"TCP", "UDP"}
         assert export.fsal.name == "RGW"
-        #assert export.fsal.rgw_user_id == "testuser"  # probably correct value
-        #assert export.fsal.access_key == "access_key"  # probably correct value
-        #assert export.fsal.secret_key == "secret_key"  # probably correct value
+        # assert export.fsal.rgw_user_id == "testuser"  # probably correct value
+        # assert export.fsal.access_key == "access_key"  # probably correct value
+        # assert export.fsal.secret_key == "secret_key"  # probably correct value
         assert len(export.clients) == 0
         assert export.cluster_id in ('_default_', 'foo')
 
@@ -328,7 +328,6 @@ EXPORT
         export = Export.from_export_block(blocks[0], '_default_')
         self._validate_export_2(export)
 
-
     def test_daemon_conf_parser_a(self) -> None:
         blocks = GaneshaConfParser(self.conf_nodea).parse()
         assert isinstance(blocks, list)
@@ -363,7 +362,6 @@ EXPORT
         self._validate_export_1([e for e in exports if e.export_id == 1][0])
         self._validate_export_2([e for e in exports if e.export_id == 2][0])
 
-
     def test_config_dict(self) -> None:
         with self._mock_orchestrator(True):
             for cluster_id, info in self.clusters.items():
@@ -377,37 +375,37 @@ EXPORT
         ex_dict = export.to_dict()
 
         assert ex_dict == {'access_type': 'RW',
-             'clients': [{'access_type': None,
-                          'addresses': ['192.168.0.10', '192.168.1.0/8'],
-                          'squash': 'None'},
-                         {'access_type': 'RO',
-                          'addresses': ['192.168.0.0/16'],
-                          'squash': 'All'}],
-             'cluster_id': 'foo',
-             'export_id': 1,
-             'fsal': {'fs_name': 'a', 'name': 'CEPH', 'user_id': 'ganesha'},
-             'path': '/',
-             'protocols': [4],
-             'pseudo': '/cephfs_a/',
-             'security_label': True,
-             'squash': 'no_root_squash',
-             'transports': [None]}
+                           'clients': [{'access_type': None,
+                                        'addresses': ['192.168.0.10', '192.168.1.0/8'],
+                                        'squash': 'None'},
+                                       {'access_type': 'RO',
+                                        'addresses': ['192.168.0.0/16'],
+                                        'squash': 'All'}],
+                           'cluster_id': 'foo',
+                           'export_id': 1,
+                           'fsal': {'fs_name': 'a', 'name': 'CEPH', 'user_id': 'ganesha'},
+                           'path': '/',
+                           'protocols': [4],
+                           'pseudo': '/cephfs_a/',
+                           'security_label': True,
+                           'squash': 'no_root_squash',
+                           'transports': [None]}
 
         export = [e for e in conf.exports['foo'] if e.export_id == 2][0]
         ex_dict = export.to_dict()
         assert ex_dict == {'access_type': 'RW',
-             'clients': [],
-             'cluster_id': 'foo',
-             'export_id': 2,
-             'fsal': {'name': 'RGW',
-                      'secret_access_key': 'secret_key',
-                      'user_id': 'testuser'},
-             'path': '/',
-             'protocols': [3, 4],
-             'pseudo': '/rgw',
-             'security_label': True,
-             'squash': 'AllAnonymous',
-             'transports': ['TCP', 'UDP']}
+                           'clients': [],
+                           'cluster_id': 'foo',
+                           'export_id': 2,
+                           'fsal': {'name': 'RGW',
+                                    'secret_access_key': 'secret_key',
+                                    'user_id': 'testuser'},
+                           'path': '/',
+                           'protocols': [3, 4],
+                           'pseudo': '/rgw',
+                           'security_label': True,
+                           'squash': 'AllAnonymous',
+                           'transports': ['TCP', 'UDP']}
 
     def test_config_from_dict(self) -> None:
         with self._mock_orchestrator(True):
@@ -459,7 +457,7 @@ EXPORT
         assert export.fsal.sec_label_xattr == 'security.selinux'
         assert len(export.clients) == 2
         assert export.clients[0].addresses == \
-                         ["192.168.0.10", "192.168.1.0/8"]
+            ["192.168.0.10", "192.168.1.0/8"]
         assert export.clients[0].squash == "no_root_squash"
         assert export.clients[0].access_type is None
         assert export.clients[1].addresses == ["192.168.0.0/16"]
@@ -565,7 +563,7 @@ EXPORT
         assert export.daemons == set(expected_exports[2])
         assert export.cluster_id == cluster_id
     """
-    
+
     def test_remove_export(self) -> None:
         with self._mock_orchestrator(True):
             for cluster_id, info in self.clusters.items():
@@ -576,7 +574,8 @@ EXPORT
         nfs_mod = Module('nfs', '', '')
         conf = ExportMgr(nfs_mod)
         assert len(conf.exports[cluster_id]) == 2
-        assert conf.delete_export(cluster_id=cluster_id, pseudo_path="/rgw") == (0, "Successfully deleted export", "")
+        assert conf.delete_export(cluster_id=cluster_id,
+                                  pseudo_path="/rgw") == (0, "Successfully deleted export", "")
         exports = conf.exports[cluster_id]
         assert len(exports) == 1
         assert exports[0].export_id == 1
@@ -719,7 +718,7 @@ EXPORT
         conf.reload_daemons(['nodea', 'nodeb'])
         self.io_mock.notify.assert_has_calls(calls)
     """
-    
+
     """
     def test_list_daemons(self):
         for cluster_id, info in self.clusters.items():
index ece47e3af139d71b16e8d017486ea570a62fc273..da7ca690b82e2e9243528f744ee2f12e273c260c 100644 (file)
@@ -30,7 +30,7 @@ def restart_nfs_service(mgr: 'Module', cluster_id: str) -> None:
     This methods restarts the nfs daemons
     '''
     completion = mgr.service_action(action='restart',
-                                    service_name='nfs.'+cluster_id)
+                                    service_name='nfs.' + cluster_id)
     orchestrator.raise_if_exception(completion)
 
 
index 1d7c7d9b580b23641ae3f9092c833f0acbe20cb1..660062d2fd689cfaeb32ec111d5d94ec5043f94b 100644 (file)
@@ -120,6 +120,7 @@ modules =
     diskprediction_local
     insights
     iostat
+    nfs
     orchestrator
     prometheus
     status
@@ -143,6 +144,7 @@ modules =
     hello
     iostat
     localpool
+    nfs
     orchestrator
     prometheus
     selftest