]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard: use python logging pkg with named loggers
authorRicardo Dias <rdias@suse.com>
Fri, 27 Sep 2019 09:55:52 +0000 (10:55 +0100)
committerRicardo Dias <rdias@suse.com>
Tue, 12 Nov 2019 11:50:50 +0000 (11:50 +0000)
Signed-off-by: Ricardo Dias <rdias@suse.com>
28 files changed:
src/pybind/mgr/dashboard/__init__.py
src/pybind/mgr/dashboard/controllers/__init__.py
src/pybind/mgr/dashboard/controllers/auth.py
src/pybind/mgr/dashboard/controllers/docs.py
src/pybind/mgr/dashboard/controllers/frontend_logging.py [new file with mode: 0644]
src/pybind/mgr/dashboard/controllers/home.py
src/pybind/mgr/dashboard/controllers/logging.py [deleted file]
src/pybind/mgr/dashboard/controllers/nfsganesha.py
src/pybind/mgr/dashboard/controllers/osd.py
src/pybind/mgr/dashboard/controllers/rbd_mirroring.py
src/pybind/mgr/dashboard/controllers/rgw.py
src/pybind/mgr/dashboard/grafana.py
src/pybind/mgr/dashboard/module.py
src/pybind/mgr/dashboard/plugins/interfaces.py
src/pybind/mgr/dashboard/rest_client.py
src/pybind/mgr/dashboard/services/access_control.py
src/pybind/mgr/dashboard/services/auth.py
src/pybind/mgr/dashboard/services/ceph_service.py
src/pybind/mgr/dashboard/services/cephfs.py
src/pybind/mgr/dashboard/services/exception.py
src/pybind/mgr/dashboard/services/ganesha.py
src/pybind/mgr/dashboard/services/iscsi_client.py
src/pybind/mgr/dashboard/services/orchestrator.py
src/pybind/mgr/dashboard/services/progress.py
src/pybind/mgr/dashboard/services/rgw_client.py
src/pybind/mgr/dashboard/services/sso.py
src/pybind/mgr/dashboard/tests/__init__.py
src/pybind/mgr/dashboard/tools.py

index a1affa2ef5888b835d4c35ac9151b1844741ea11..cd1b39d4e525271fc1275f95bb3db4886691af1b 100644 (file)
@@ -9,23 +9,12 @@ import os
 
 
 if 'UNITTEST' not in os.environ:
-    class _LoggerProxy(object):
-        def __init__(self):
-            self._logger = None
-
-        def __getattr__(self, item):
-            if self._logger is None:
-                raise AttributeError("logger not initialized")
-            return getattr(self._logger, item)
-
     class _ModuleProxy(object):
         def __init__(self):
             self._mgr = None
 
         def init(self, module_inst):
-            global logger
             self._mgr = module_inst
-            logger._logger = self._mgr._logger
 
         def __getattr__(self, item):
             if self._mgr is None:
@@ -33,14 +22,12 @@ if 'UNITTEST' not in os.environ:
             return getattr(self._mgr, item)
 
     mgr = _ModuleProxy()
-    logger = _LoggerProxy()
 
     # DO NOT REMOVE: required for ceph-mgr to load a module
     from .module import Module, StandbyModule  # noqa: F401
 else:
     import logging
     logging.basicConfig(level=logging.DEBUG)
-    logger = logging.getLogger(__name__)
     logging.root.handlers[0].setLevel(logging.DEBUG)
     os.environ['PATH'] = '{}:{}'.format(os.path.abspath('../../../../build/bin'),
                                         os.environ['PATH'])
index 04169b3997331791e0db4072b02ed4afe785ee04..348eed9ad2b3e5251f4b7fa750ec5e92fd75b3a5 100644 (file)
@@ -6,6 +6,7 @@ import collections
 import importlib
 import inspect
 import json
+import logging
 import os
 import pkgutil
 import re
@@ -17,7 +18,6 @@ from six.moves.urllib.parse import unquote
 # pylint: disable=wrong-import-position
 import cherrypy
 
-from .. import logger
 from ..security import Scope, Permission
 from ..tools import wraps, getargspec, TaskManager, get_request_body_params
 from ..exceptions import ScopeNotValid, PermissionNotValid
@@ -133,8 +133,6 @@ class ControllerDoc(object):
 class Controller(object):
     def __init__(self, path, base_url=None, security_scope=None, secure=True):
         if security_scope and not Scope.valid_scope(security_scope):
-            logger.debug("Invalid security scope name: %s\n Possible values: "
-                         "%s", security_scope, Scope.all_scopes())
             raise ScopeNotValid(security_scope)
         self.path = path
         self.base_url = base_url
@@ -254,19 +252,20 @@ def Proxy(path=None):  # noqa: N802
 
 
 def load_controllers():
+    logger = logging.getLogger('controller.load')
     # setting sys.path properly when not running under the mgr
     controllers_dir = os.path.dirname(os.path.realpath(__file__))
     dashboard_dir = os.path.dirname(controllers_dir)
     mgr_dir = os.path.dirname(dashboard_dir)
-    logger.debug("LC: controllers_dir=%s", controllers_dir)
-    logger.debug("LC: dashboard_dir=%s", dashboard_dir)
-    logger.debug("LC: mgr_dir=%s", mgr_dir)
+    logger.debug("controllers_dir=%s", controllers_dir)
+    logger.debug("dashboard_dir=%s", dashboard_dir)
+    logger.debug("mgr_dir=%s", mgr_dir)
     if mgr_dir not in sys.path:
         sys.path.append(mgr_dir)
 
     controllers = []
     mods = [mod for _, mod, _ in pkgutil.iter_modules([controllers_dir])]
-    logger.debug("LC: mods=%s", mods)
+    logger.debug("mods=%s", mods)
     for mod_name in mods:
         mod = importlib.import_module('.controllers.{}'.format(mod_name),
                                       package='dashboard')
@@ -320,6 +319,7 @@ def generate_controller_routes(endpoint, mapper, base_url):
 
     url = "{}{}".format(base_url, endp_url)
 
+    logger = logging.getLogger('controller')
     logger.debug("Mapped [%s] to %s:%s restricted to %s",
                  url, ctrl_class.__name__, endpoint.action,
                  endpoint.method)
@@ -357,6 +357,7 @@ def generate_routes(url_prefix):
         parent_urls.add(generate_controller_routes(endpoint, mapper,
                                                    "{}".format(url_prefix)))
 
+    logger = logging.getLogger('controller')
     logger.debug("list of parent paths: %s", parent_urls)
     return mapper, parent_urls
 
@@ -586,6 +587,7 @@ class BaseController(object):
                                                  self.action)
 
     def __init__(self):
+        logger = logging.getLogger('controller')
         logger.info('Initializing controller: %s -> %s',
                     self.__class__.__name__, self._cp_path_)
 
@@ -896,6 +898,7 @@ def _set_func_permissions(func, permissions):
 
     for perm in permissions:
         if not Permission.valid_permission(perm):
+            logger = logging.getLogger('controller.set_func_perms')
             logger.debug("Invalid security permission: %s\n "
                          "Possible values: %s", perm,
                          Permission.all_permissions())
index d98c1387c06262ec2f3a8070b454e1e94a9b4d64..4c4b1a6027ce55357a4f8162d6d03458277493d7 100644 (file)
@@ -1,14 +1,18 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 
+import logging
 import cherrypy
 
 from . import ApiController, RESTController
-from .. import logger, mgr
+from .. import mgr
 from ..exceptions import DashboardException
 from ..services.auth import AuthManager, JwtManager
 
 
+logger = logging.getLogger('controllers.auth')
+
+
 @ApiController('/auth', secure=False)
 class Auth(RESTController):
     """
index ec5d3c10bc2aaac3c8a692ac352d34bec711a5ab..1203db74e7f9a93e869b37bb2154c3c0cf41e024 100644 (file)
@@ -1,14 +1,18 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 
+import logging
 import cherrypy
 
 from . import Controller, BaseController, Endpoint, ENDPOINT_MAP
-from .. import logger, mgr
+from .. import mgr
 
 from ..tools import str_to_bool
 
 
+logger = logging.getLogger('controllers.docs')
+
+
 @Controller('/docs', secure=False)
 class Docs(BaseController):
 
@@ -314,7 +318,7 @@ class Docs(BaseController):
 
         host = cherrypy.request.base
         host = host[host.index(':')+3:]
-        logger.debug("DOCS: Host: %s", host)
+        logger.debug("Host: %s", host)
 
         paths = self._gen_paths(all_endpoints, base_url)
 
diff --git a/src/pybind/mgr/dashboard/controllers/frontend_logging.py b/src/pybind/mgr/dashboard/controllers/frontend_logging.py
new file mode 100644 (file)
index 0000000..910e39b
--- /dev/null
@@ -0,0 +1,16 @@
+from __future__ import absolute_import
+
+import logging
+
+from . import UiApiController, BaseController, Endpoint
+
+
+logger = logging.getLogger('frontend.error')
+
+
+@UiApiController('/logging', secure=False)
+class FrontendLogging(BaseController):
+
+    @Endpoint('POST', path='js-error')
+    def jsError(self, url, message, stack=None):  # noqa: N802
+        logger.error('(%s): %s\n %s\n', url, message, stack)
index df11340934c003f77edbc057e7411fedfc4d5092..2b05eaee02678de654d876683e09d46d0d6a4a5c 100644 (file)
@@ -4,6 +4,7 @@ from __future__ import absolute_import
 import os
 import re
 import json
+import logging
 try:
     from functools import lru_cache
 except ImportError:
@@ -13,7 +14,10 @@ import cherrypy
 from cherrypy.lib.static import serve_file
 
 from . import Controller, UiApiController, BaseController, Proxy, Endpoint
-from .. import mgr, logger
+from .. import mgr
+
+
+logger = logging.getLogger("controllers.home")
 
 
 LANGUAGES = {f for f in os.listdir(mgr.get_frontend_path())
diff --git a/src/pybind/mgr/dashboard/controllers/logging.py b/src/pybind/mgr/dashboard/controllers/logging.py
deleted file mode 100644 (file)
index 2cbced0..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-from . import UiApiController, BaseController, Endpoint
-from .. import logger
-
-
-@UiApiController('/logging', secure=False)
-class Logging(BaseController):
-
-    @Endpoint('POST', path='js-error')
-    def jsError(self, url, message, stack=None):  # noqa: N802
-        logger.error('frontend error (%s): %s\n %s\n', url, message, stack)
index 50f50b46d22c3f53403e01606262f1ae3a30e5ae..157b94d46d4bfb8200f71ec110d272fd41fe812e 100644 (file)
@@ -2,13 +2,13 @@
 from __future__ import absolute_import
 
 from functools import partial
+import logging
 
 import cherrypy
 import cephfs
 
 from . import ApiController, RESTController, UiApiController, BaseController, \
               Endpoint, Task, ReadPermission, ControllerDoc, EndpointDoc
-from .. import logger
 from ..security import Scope
 from ..services.cephfs import CephFS
 from ..services.cephx import CephX
@@ -17,6 +17,9 @@ from ..services.ganesha import Ganesha, GaneshaConf, NFSException
 from ..services.rgw_client import RgwClient
 
 
+logger = logging.getLogger('controllers.ganesha')
+
+
 # documentation helpers
 EXPORT_SCHEMA = {
     'export_id': (int, 'Export ID'),
@@ -282,7 +285,7 @@ class NFSGaneshaUi(BaseController):
             root_dir = "/"
         depth = int(depth)
         if depth > 5:
-            logger.warning("[NFS] Limiting depth to maximum value of 5: "
+            logger.warning("Limiting depth to maximum value of 5: "
                            "input depth=%s", depth)
             depth = 5
         root_dir = '{}{}'.format(root_dir.rstrip('/'), '/')
index 2e412a98479f2483bd856e1d9f979aef81ec24fd..b36681b982058b709c7449333890155abbe2441e 100644 (file)
@@ -1,8 +1,9 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 import json
+import logging
 from . import ApiController, RESTController, Endpoint, ReadPermission, UpdatePermission
-from .. import mgr, logger
+from .. import mgr
 from ..security import Scope
 from ..services.ceph_service import CephService, SendCommandError
 from ..services.exception import handle_send_command_error
@@ -13,6 +14,9 @@ except ImportError:
     pass  # For typing only
 
 
+logger = logging.getLogger('controllers.osd')
+
+
 @ApiController('/osd', Scope.OSD)
 class Osd(RESTController):
     def list(self):
@@ -83,7 +87,7 @@ class Osd(RESTController):
                     for _, dev_data in dev_smart_data.items():
                         if 'error' in dev_data:
                             logger.warning(
-                                '[OSD] Error retrieving smartctl data for device ID %s: %s', dev_id,
+                                'Error retrieving smartctl data for device ID %s: %s', dev_id,
                                 dev_smart_data)
                     smart_data.update(dev_smart_data)
         return smart_data
index 1d0a360b497f7487f8eb5e4c803a9bc6fc050569..2fd4c607f5e35868627263cc5eb7aa6f93ec191e 100644 (file)
@@ -3,6 +3,7 @@ from __future__ import absolute_import
 
 import json
 import re
+import logging
 
 from functools import partial
 
@@ -14,7 +15,7 @@ from . import ApiController, Endpoint, Task, BaseController, ReadPermission, \
     RESTController
 from .rbd import _rbd_call
 
-from .. import logger, mgr
+from .. import mgr
 from ..security import Scope
 from ..services.ceph_service import CephService
 from ..tools import ViewCache
@@ -22,6 +23,9 @@ from ..services.exception import handle_rados_error, handle_rbd_error, \
     serialize_dashboard_exception
 
 
+logger = logging.getLogger('controllers.rbd_mirror')
+
+
 # pylint: disable=not-callable
 def handle_rbd_mirror_error():
     def composed_decorator(func):
index f743099c0c365f07fb2212f8dc6651cbe5717911..608093dea889261af250fadd2bbf06776e945c6e 100644 (file)
@@ -1,13 +1,13 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 
+import logging
 import json
 
 import cherrypy
 
 from . import ApiController, BaseController, RESTController, Endpoint, \
     ReadPermission
-from .. import logger
 from ..exceptions import DashboardException
 from ..rest_client import RequestException
 from ..security import Scope
@@ -16,6 +16,9 @@ from ..services.rgw_client import RgwClient
 from ..tools import json_str_to_object
 
 
+logger = logging.getLogger('controllers.rgw')
+
+
 @ApiController('/rgw', Scope.RGW)
 class Rgw(BaseController):
 
index 1399dce3c4b2348e92ca7342b5975845002de2ea..aedad836bbb8b7b12cd279f0017a8894d9814222 100644 (file)
@@ -2,15 +2,18 @@
 from __future__ import absolute_import
 
 import json
+import logging
 import os
 import time
 import requests
 
-from . import logger
 from .exceptions import GrafanaError
 from .settings import Settings
 
 
+logger = logging.getLogger('grafana')
+
+
 class GrafanaRestClient(object):
 
     @staticmethod
index b9b68429e7d3f31d3d3f2ad28817bbb9454cf6f5..3d1cca32d15fa5f3927c62d140260d40ab4b4ddf 100644 (file)
@@ -6,6 +6,7 @@ from __future__ import absolute_import
 
 import collections
 import errno
+import logging
 import os
 import socket
 import tempfile
@@ -39,7 +40,7 @@ if 'COVERAGE_ENABLED' in os.environ:
     cherrypy.engine.subscribe('stop', __cov.stop)
 
 # pylint: disable=wrong-import-position
-from . import logger, mgr
+from . import mgr
 from .controllers import generate_routes, json_error_page
 from .grafana import push_local_dashboards
 from .tools import NotificationQueue, RequestLoggingTool, TaskManager, \
@@ -68,6 +69,9 @@ def os_exit_noop(*args):
 os._exit = os_exit_noop
 
 
+logger = logging.getLogger(__name__)
+
+
 class CherryPyConfig(object):
     """
     Class for common server configuration done by both active and
index ce449acfe01f1b1fbb7f50d8902a3aa4a14726ef..a08c3aac1c3f8d80178ae26c633e845c5a71772f 100644 (file)
@@ -10,8 +10,8 @@ class CanMgr(Mixin):
 
 
 class CanLog(Mixin):
-    from .. import logger
-    log = logger
+    import logging
+    log = logging.getLogger()
 
 
 class CanCherrypy(Mixin):
index d81b61c3a52478e70c71f5fb691e4a82a2520127..83d7fc6322f2e743681408ffaecb9183e5cb6d0f 100644 (file)
 """
 from __future__ import absolute_import
 
-from .settings import Settings
-from .tools import build_url
 import inspect
+import logging
 import re
 import requests
 from requests.exceptions import ConnectionError, InvalidURL, Timeout
-from . import logger
+from .settings import Settings
+from .tools import build_url
 
 try:
     from requests.packages.urllib3.exceptions import SSLError
@@ -27,6 +27,9 @@ except ImportError:
     from urllib3.exceptions import SSLError
 
 
+logger = logging.getLogger('rest_client')
+
+
 class TimeoutRequestsSession(requests.Session):
     """
     Set timeout argument for all requests if this is not already done.
index d3d127300829292ef2631dfb613d85875e147348..6bb58e1d54f5f82ef979f17ac8fcfa69c80f9370 100644 (file)
@@ -7,6 +7,7 @@ from string import punctuation, ascii_lowercase, digits, ascii_uppercase
 
 import errno
 import json
+import logging
 import threading
 import time
 import re
@@ -15,7 +16,7 @@ import bcrypt
 
 from mgr_module import CLIReadCommand, CLIWriteCommand
 
-from .. import mgr, logger
+from .. import mgr
 from ..security import Scope, Permission
 from ..exceptions import RoleAlreadyExists, RoleDoesNotExist, ScopeNotValid, \
                          PermissionNotValid, RoleIsAssociatedWithUser, \
@@ -23,6 +24,9 @@ from ..exceptions import RoleAlreadyExists, RoleDoesNotExist, ScopeNotValid, \
                          RoleNotInUser
 
 
+logger = logging.getLogger('access_control')
+
+
 # password hashing algorithm
 def password_hash(password, salt_password=None):
     if not password:
@@ -369,7 +373,7 @@ class AccessControlDB(object):
             del self.roles[name]
 
     def create_user(self, username, password, name, email, enabled=True):
-        logger.debug("AC: creating user: username=%s", username)
+        logger.debug("creating user: username=%s", username)
         with self.lock:
             if username in self.users:
                 raise UserAlreadyExists(username)
@@ -413,14 +417,14 @@ class AccessControlDB(object):
         return "{}{}".format(cls.ACDB_CONFIG_KEY, version)
 
     def check_and_update_db(self):
-        logger.debug("AC: Checking for previews DB versions")
+        logger.debug("Checking for previews DB versions")
 
         def check_migrate_v0_to_current():
             # check if there is username/password from previous version
             username = mgr.get_module_option('username', None)
             password = mgr.get_module_option('password', None)
             if username and password:
-                logger.debug("AC: Found single user credentials: user=%s", username)
+                logger.debug("Found single user credentials: user=%s", username)
                 # found user credentials
                 user = self.create_user(username, "", None, None)
                 # password is already hashed, so setting manually
@@ -432,7 +436,7 @@ class AccessControlDB(object):
             # Check if version 1 exists in the DB and migrate it to current version
             v1_db = mgr.get_store(self.accessdb_config_key(1))
             if v1_db:
-                logger.debug("AC: Found database v1 credentials")
+                logger.debug("Found database v1 credentials")
                 v1_db = json.loads(v1_db)
 
                 for user, _ in v1_db['users'].items():
@@ -451,11 +455,11 @@ class AccessControlDB(object):
 
     @classmethod
     def load(cls):
-        logger.info("AC: Loading user roles DB version=%s", cls.VERSION)
+        logger.info("Loading user roles DB version=%s", cls.VERSION)
 
         json_db = mgr.get_store(cls.accessdb_config_key())
         if json_db is None:
-            logger.debug("AC: No DB v%s found, creating new...", cls.VERSION)
+            logger.debug("No DB v%s found, creating new...", cls.VERSION)
             db = cls(cls.VERSION, {}, {})
             # check if we can update from a previous version database
             db.check_and_update_db()
index f02fb993845581d833a5ee4ebc459dec87ed8aa9..afddfd99aa58851487d70072cb7836bbab3fb09a 100644 (file)
@@ -3,6 +3,7 @@ from __future__ import absolute_import
 
 from base64 import b64encode
 import json
+import logging
 import os
 import threading
 import time
@@ -12,7 +13,7 @@ import cherrypy
 import jwt
 
 from .access_control import LocalAuthenticator, UserDoesNotExist
-from .. import mgr, logger
+from .. import mgr
 
 
 class JwtManager(object):
@@ -30,6 +31,7 @@ class JwtManager(object):
 
     @classmethod
     def init(cls):
+        cls.logger = logging.getLogger('jwt')
         # generate a new secret if it does not exist
         secret = mgr.get_store('jwt_secret')
         if secret is None:
@@ -88,16 +90,16 @@ class JwtManager(object):
                 user = AuthManager.get_user(dtoken['username'])
                 if user.last_update <= dtoken['iat']:
                     return user
-                logger.debug("AMT: user info changed after token was issued, iat=%s last_update=%s",
-                             dtoken['iat'], user.last_update)
+                cls.logger.debug("user info changed after token was issued, iat=%s last_update=%s",
+                                 dtoken['iat'], user.last_update)
             else:
-                logger.debug('AMT: Token is black-listed')
+                cls.logger.debug('Token is black-listed')
         except jwt.exceptions.ExpiredSignatureError:
-            logger.debug("AMT: Token has expired")
+            cls.logger.debug("Token has expired")
         except jwt.exceptions.InvalidTokenError:
-            logger.debug("AMT: Failed to decode token")
+            cls.logger.debug("Failed to decode token")
         except UserDoesNotExist:
-            logger.debug("AMT: Invalid token: user %s does not exist", dtoken['username'])
+            cls.logger.debug("Invalid token: user %s does not exist", dtoken['username'])
         return None
 
     @classmethod
@@ -153,23 +155,24 @@ class AuthManagerTool(cherrypy.Tool):
     def __init__(self):
         super(AuthManagerTool, self).__init__(
             'before_handler', self._check_authentication, priority=20)
+        self.logger = logging.getLogger('auth')
 
     def _check_authentication(self):
         JwtManager.reset_user()
         token = JwtManager.get_token_from_header()
-        logger.debug("AMT: token: %s", token)
+        self.logger.debug("token: %s", token)
         if token:
             user = JwtManager.get_user(token)
             if user:
                 self._check_authorization(user.username)
                 return
-        logger.debug('AMT: Unauthorized access to %s',
-                     cherrypy.url(relative='server'))
+        self.logger.debug('Unauthorized access to %s',
+                          cherrypy.url(relative='server'))
         raise cherrypy.HTTPError(401, 'You are not authorized to access '
                                       'that resource')
 
     def _check_authorization(self, username):
-        logger.debug("AMT: checking authorization...")
+        self.logger.debug("checking authorization...")
         username = username
         handler = cherrypy.request.handler.callable
         controller = handler.__self__
@@ -181,12 +184,12 @@ class AuthManagerTool(cherrypy.Tool):
             # controller does not define any authorization restrictions
             return
 
-        logger.debug("AMT: checking '%s' access to '%s' scope", sec_perms,
-                     sec_scope)
+        self.logger.debug("checking '%s' access to '%s' scope", sec_perms,
+                          sec_scope)
 
         if not sec_perms:
-            logger.debug("Fail to check permission on: %s:%s", controller,
-                         handler)
+            self.logger.debug("Fail to check permission on: %s:%s", controller,
+                              handler)
             raise cherrypy.HTTPError(403, "You don't have permissions to "
                                           "access that resource")
 
index e36628224ac24308caa284ef422df4f0f49b4d03..e74b7f7070275c82db48b2df49caf4d9baffc76e 100644 (file)
@@ -2,6 +2,7 @@
 from __future__ import absolute_import
 
 import json
+import logging
 
 import rados
 
@@ -16,7 +17,7 @@ except ImportError:
         next(b, None)
         return zip(a, b)
 
-from .. import logger, mgr
+from .. import mgr
 
 try:
     from typing import Dict, Any  # pylint: disable=unused-import
@@ -24,6 +25,9 @@ except ImportError:
     pass  # For typing only
 
 
+logger = logging.getLogger('ceph_service')
+
+
 class SendCommandError(rados.Error):
     def __init__(self, err, prefix, argdict, errno):
         self.prefix = prefix
@@ -163,9 +167,8 @@ class CephService(object):
         mgr.send_command(result, srv_type, srv_spec, json.dumps(argdict), "")
         r, outb, outs = result.wait()
         if r != 0:
-            msg = "send_command '{}' failed. (r={}, outs=\"{}\", kwargs={})".format(prefix, r, outs,
-                                                                                    kwargs)
-            logger.error(msg)
+            logger.error("send_command '%s' failed. (r=%s, outs=\"%s\", kwargs=%s)", prefix, r,
+                         outs, kwargs)
             raise SendCommandError(outs, prefix, argdict, r)
 
         try:
index 3dfcebc7fef8d8ed2af5077b18801bcf001dc34f..926ab32cac454dc20a52dd8e33f2aa64977c0701 100644 (file)
@@ -2,13 +2,17 @@
 from __future__ import absolute_import
 
 from contextlib import contextmanager
+import logging
 
 import datetime
 import os
 import six
 import cephfs
 
-from .. import mgr, logger
+from .. import mgr
+
+
+logger = logging.getLogger('cephfs')
 
 
 class CephFS(object):
@@ -35,17 +39,17 @@ class CephFS(object):
         return fs_info[0]['mdsmap']['fs_name']
 
     def __init__(self, fs_name=None):
-        logger.debug("[CephFS] initializing cephfs connection")
+        logger.debug("initializing cephfs connection")
         self.cfs = cephfs.LibCephFS(rados_inst=mgr.rados)
-        logger.debug("[CephFS] mounting cephfs filesystem: %s", fs_name)
+        logger.debug("mounting cephfs filesystem: %s", fs_name)
         if fs_name:
             self.cfs.mount(filesystem_name=fs_name)
         else:
             self.cfs.mount()
-        logger.debug("[CephFS] mounted cephfs filesystem")
+        logger.debug("mounted cephfs filesystem")
 
     def __del__(self):
-        logger.debug("[CephFS] shutting down cephfs filesystem")
+        logger.debug("shutting down cephfs filesystem")
         self.cfs.shutdown()
 
     @contextmanager
@@ -75,21 +79,21 @@ class CephFS(object):
         """
         if isinstance(path, six.string_types):
             path = path.encode()
-        logger.debug("[CephFS] get_dir_list dir_path=%s level=%s",
-                     path, level)
+        logger.debug("get_dir_list dirpath=%s level=%s", path,
+                     level)
         if level == 0:
             return [path]
-        logger.debug("[CephFS] opening dir_path=%s", path)
+        logger.debug("opening dirpath=%s", path)
         with self.opendir(path) as d:
             dent = self.cfs.readdir(d)
             paths = [path]
             while dent:
-                logger.debug("[CephFS] found entry=%s", dent.d_name)
+                logger.debug("found entry=%s", dent.d_name)
                 if dent.d_name in [b'.', b'..']:
                     dent = self.cfs.readdir(d)
                     continue
                 if dent.is_dir():
-                    logger.debug("[CephFS] found dir=%s", dent.d_name)
+                    logger.debug("found dir=%s", dent.d_name)
                     subdir_path = os.path.join(path, dent.d_name)
                     paths.extend(self.ls_dir(subdir_path, level - 1))
                 dent = self.cfs.readdir(d)
@@ -111,7 +115,7 @@ class CephFS(object):
             raise Exception('Cannot create root directory "/"')
         if self.dir_exists(path):
             return
-        logger.info("[CephFS] Creating directory: %s", path)
+        logger.info("Creating directory: %s", path)
         self.cfs.mkdirs(path, 0o755)
 
     def rm_dir(self, path):
@@ -123,7 +127,7 @@ class CephFS(object):
             raise Exception('Cannot remove root directory "/"')
         if not self.dir_exists(path):
             return
-        logger.info("[CephFS] Removing directory: %s", path)
+        logger.info("Removing directory: %s", path)
         self.cfs.rmdir(path)
 
     def mk_snapshot(self, path, name=None, mode=0o755):
@@ -147,7 +151,7 @@ class CephFS(object):
             name = now.replace(tzinfo=tz).isoformat('T')
         client_snapdir = self.cfs.conf_get('client_snapdir')
         snapshot_path = os.path.join(path, client_snapdir, name)
-        logger.info("[CephFS] Creating snapshot: %s", snapshot_path)
+        logger.info("Creating snapshot: %s", snapshot_path)
         self.cfs.mkdir(snapshot_path, mode)
         return name
 
@@ -188,7 +192,7 @@ class CephFS(object):
         """
         client_snapdir = self.cfs.conf_get('client_snapdir')
         snapshot_path = os.path.join(path, client_snapdir, name)
-        logger.info("[CephFS] Removing snapshot: %s", snapshot_path)
+        logger.info("Removing snapshot: %s", snapshot_path)
         self.cfs.rmdir(snapshot_path)
 
     def get_quotas(self, path):
index 66ba96cfb7ac8313dfab65b773de28915b84afb4..4a1f2a105d1355289daa119394aec1ea455e1321 100644 (file)
@@ -3,6 +3,7 @@ from __future__ import absolute_import
 
 import json
 from contextlib import contextmanager
+import logging
 import six
 
 import cherrypy
@@ -10,16 +11,20 @@ import cherrypy
 import rbd
 import rados
 
-from .. import logger
 from ..services.ceph_service import SendCommandError
 from ..exceptions import ViewCacheNoDataException, DashboardException
 from ..tools import wraps
 
+
+logger = logging.getLogger('exception')
+
+
 if six.PY2:
     # Monkey-patch a __call__ method into @contextmanager to make
     # it compatible to Python 3
 
-    from contextlib import GeneratorContextManager  # pylint: disable=no-name-in-module
+    # pylint: disable=no-name-in-module,ungrouped-imports
+    from contextlib import GeneratorContextManager
 
     def init(self, *args):
         if len(args) == 1:
@@ -88,7 +93,7 @@ def dashboard_exception_handler(handler, *args, **kwargs):
             return handler(*args, **kwargs)
     # Don't catch cherrypy.* Exceptions.
     except (ViewCacheNoDataException, DashboardException) as e:
-        logger.exception('dashboard_exception_handler')
+        logger.exception('Dashboard Exception')
         cherrypy.response.headers['Content-Type'] = 'application/json'
         cherrypy.response.status = getattr(e, 'status', 400)
         return json.dumps(serialize_dashboard_exception(e)).encode('utf-8')
index c052ade9b51b0d1afc3052087c3e31ab3f86d4f3..658c0915b67d751d3ab7144581c9273c4f95b98f 100644 (file)
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 
+import logging
 import re
 
 from orchestrator import OrchestratorError
@@ -8,11 +9,14 @@ from .cephfs import CephFS
 from .cephx import CephX
 from .orchestrator import OrchClient
 from .rgw_client import RgwClient, RequestException, NoCredentialsException
-from .. import mgr, logger
+from .. import mgr
 from ..settings import Settings
 from ..exceptions import DashboardException
 
 
+logger = logging.getLogger('ganesha')
+
+
 class NFSException(DashboardException):
     def __init__(self, msg):
         super(NFSException, self).__init__(component="nfs", msg=msg)
@@ -128,9 +132,9 @@ class Ganesha(object):
 
     @classmethod
     def reload_daemons(cls, cluster_id, daemons_id):
-        logger.debug("[NFS] issued reload of daemons: %s", daemons_id)
+        logger.debug("issued reload of daemons: %s", daemons_id)
         if not OrchClient.instance().available():
-            logger.debug("[NFS] orchestrator not available")
+            logger.debug("orchestrator not available")
             return
         reload_list = []
         daemons = cls.get_daemons_status()
@@ -636,7 +640,7 @@ class Export(object):
 
     @classmethod
     def from_export_block(cls, export_block, cluster_id, defaults):
-        logger.debug("[NFS] parsing export block: %s", export_block)
+        logger.debug("parsing export block: %s", export_block)
 
         fsal_block = [b for b in export_block['_blocks_']
                       if b['block_name'] == "FSAL"]
@@ -795,7 +799,7 @@ class GaneshaConf(object):
                     size, _ = obj.stat()
                     raw_config = obj.read(size)
                     raw_config = raw_config.decode("utf-8")
-                    logger.debug("[NFS] read export configuration from rados "
+                    logger.debug("read export configuration from rados "
                                  "object %s/%s/%s:\n%s", self.rados_pool,
                                  self.rados_namespace, obj.key, raw_config)
                     self.export_conf_blocks.extend(
@@ -804,7 +808,7 @@ class GaneshaConf(object):
                     size, _ = obj.stat()
                     raw_config = obj.read(size)
                     raw_config = raw_config.decode("utf-8")
-                    logger.debug("[NFS] read daemon configuration from rados "
+                    logger.debug("read daemon configuration from rados "
                                  "object %s/%s/%s:\n%s", self.rados_pool,
                                  self.rados_namespace, obj.key, raw_config)
                     idx = obj.key.find('-')
@@ -818,7 +822,7 @@ class GaneshaConf(object):
                 ioctx.set_namespace(self.rados_namespace)
             ioctx.write_full(obj, raw_config.encode('utf-8'))
             logger.debug(
-                "[NFS] write configuration into rados object %s/%s/%s:\n%s",
+                "write configuration into rados object %s/%s/%s:\n%s",
                 self.rados_pool, self.rados_namespace, obj, raw_config)
 
     @classmethod
@@ -847,7 +851,7 @@ class GaneshaConf(object):
         if squash.lower() in ["all", "all_squash", "allsquash",
                               "all_anonymous", "allanonymous"]:
             return "all_squash"
-        logger.error("[NFS] could not parse squash value: %s", squash)
+        logger.error("could not parse squash value: %s", squash)
         raise NFSException("'{}' is an invalid squash option".format(squash))
 
     @classmethod
@@ -856,7 +860,7 @@ class GaneshaConf(object):
             return 3
         if str(protocol) in ["NFSV4", "4", "V4", "NFS4"]:
             return 4
-        logger.error("[NFS] could not parse protocol value: %s", protocol)
+        logger.error("could not parse protocol value: %s", protocol)
         raise NFSException("'{}' is an invalid NFS protocol version identifier"
                            .format(protocol))
 
index d2a64c462b3dabfb7dd29eb67879d55fe5a4295f..f41639e7a09f36a2c19023ddf50d4c6431b4ae1d 100644 (file)
@@ -3,6 +3,7 @@
 from __future__ import absolute_import
 
 import json
+import logging
 
 from requests.auth import HTTPBasicAuth
 
@@ -12,11 +13,13 @@ except ImportError:
     from urllib.parse import urlparse
 
 from .iscsi_config import IscsiGatewaysConfig  # pylint: disable=cyclic-import
-from .. import logger
 from ..settings import Settings
 from ..rest_client import RestClient
 
 
+logger = logging.getLogger('iscsi_client')
+
+
 class IscsiClient(RestClient):
     _CLIENT_NAME = 'iscsi'
     _instances = {}
@@ -78,19 +81,19 @@ class IscsiClient(RestClient):
 
     @RestClient.api_put('/api/target/{target_iqn}')
     def create_target(self, target_iqn, target_controls, request=None):
-        logger.debug("iSCSI[%s] Creating target: %s", self.gateway_name, target_iqn)
+        logger.debug("[%s] Creating target: %s", self.gateway_name, target_iqn)
         return request({
             'controls': json.dumps(target_controls)
         })
 
     @RestClient.api_delete('/api/target/{target_iqn}')
     def delete_target(self, target_iqn, request=None):
-        logger.debug("iSCSI[%s] Deleting target: %s", self.gateway_name, target_iqn)
+        logger.debug("[%s] Deleting target: %s", self.gateway_name, target_iqn)
         return request()
 
     @RestClient.api_put('/api/target/{target_iqn}')
     def reconfigure_target(self, target_iqn, target_controls, request=None):
-        logger.debug("iSCSI[%s] Reconfiguring target: %s", self.gateway_name, target_iqn)
+        logger.debug("[%s] Reconfiguring target: %s", self.gateway_name, target_iqn)
         return request({
             'mode': 'reconfigure',
             'controls': json.dumps(target_controls)
@@ -98,7 +101,7 @@ class IscsiClient(RestClient):
 
     @RestClient.api_put('/api/gateway/{target_iqn}/{gateway_name}')
     def create_gateway(self, target_iqn, gateway_name, ip_address, request=None):
-        logger.debug("iSCSI[%s] Creating gateway: %s/%s", self.gateway_name, target_iqn,
+        logger.debug("[%s] Creating gateway: %s/%s", self.gateway_name, target_iqn,
                      gateway_name)
         return request({
             'ip_address': ','.join(ip_address),
@@ -111,12 +114,12 @@ class IscsiClient(RestClient):
 
     @RestClient.api_delete('/api/gateway/{target_iqn}/{gateway_name}')
     def delete_gateway(self, target_iqn, gateway_name, request=None):
-        logger.debug("iSCSI: Deleting gateway: %s/%s", target_iqn, gateway_name)
+        logger.debug("Deleting gateway: %s/%s", target_iqn, gateway_name)
         return request()
 
     @RestClient.api_put('/api/disk/{pool}/{image}')
     def create_disk(self, pool, image, backstore, wwn, request=None):
-        logger.debug("iSCSI[%s] Creating disk: %s/%s", self.gateway_name, pool, image)
+        logger.debug("[%s] Creating disk: %s/%s", self.gateway_name, pool, image)
         return request({
             'mode': 'create',
             'backstore': backstore,
@@ -125,14 +128,14 @@ class IscsiClient(RestClient):
 
     @RestClient.api_delete('/api/disk/{pool}/{image}')
     def delete_disk(self, pool, image, request=None):
-        logger.debug("iSCSI[%s] Deleting disk: %s/%s", self.gateway_name, pool, image)
+        logger.debug("[%s] Deleting disk: %s/%s", self.gateway_name, pool, image)
         return request({
             'preserve_image': 'true'
         })
 
     @RestClient.api_put('/api/disk/{pool}/{image}')
     def reconfigure_disk(self, pool, image, controls, request=None):
-        logger.debug("iSCSI[%s] Reconfiguring disk: %s/%s", self.gateway_name, pool, image)
+        logger.debug("[%s] Reconfiguring disk: %s/%s", self.gateway_name, pool, image)
         return request({
             'controls': json.dumps(controls),
             'mode': 'reconfigure'
@@ -140,7 +143,7 @@ class IscsiClient(RestClient):
 
     @RestClient.api_put('/api/targetlun/{target_iqn}')
     def create_target_lun(self, target_iqn, image_id, lun, request=None):
-        logger.debug("iSCSI[%s] Creating target lun: %s/%s", self.gateway_name, target_iqn,
+        logger.debug("[%s] Creating target lun: %s/%s", self.gateway_name, target_iqn,
                      image_id)
         return request({
             'disk': image_id,
@@ -149,7 +152,7 @@ class IscsiClient(RestClient):
 
     @RestClient.api_delete('/api/targetlun/{target_iqn}')
     def delete_target_lun(self, target_iqn, image_id, request=None):
-        logger.debug("iSCSI[%s] Deleting target lun: %s/%s", self.gateway_name, target_iqn,
+        logger.debug("[%s] Deleting target lun: %s/%s", self.gateway_name, target_iqn,
                      image_id)
         return request({
             'disk': image_id
@@ -157,17 +160,17 @@ class IscsiClient(RestClient):
 
     @RestClient.api_put('/api/client/{target_iqn}/{client_iqn}')
     def create_client(self, target_iqn, client_iqn, request=None):
-        logger.debug("iSCSI[%s] Creating client: %s/%s", self.gateway_name, target_iqn, client_iqn)
+        logger.debug("[%s] Creating client: %s/%s", self.gateway_name, target_iqn, client_iqn)
         return request()
 
     @RestClient.api_delete('/api/client/{target_iqn}/{client_iqn}')
     def delete_client(self, target_iqn, client_iqn, request=None):
-        logger.debug("iSCSI[%s] Deleting client: %s/%s", self.gateway_name, target_iqn, client_iqn)
+        logger.debug("[%s] Deleting client: %s/%s", self.gateway_name, target_iqn, client_iqn)
         return request()
 
     @RestClient.api_put('/api/clientlun/{target_iqn}/{client_iqn}')
     def create_client_lun(self, target_iqn, client_iqn, image_id, request=None):
-        logger.debug("iSCSI[%s] Creating client lun: %s/%s", self.gateway_name, target_iqn,
+        logger.debug("[%s] Creating client lun: %s/%s", self.gateway_name, target_iqn,
                      client_iqn)
         return request({
             'disk': image_id
@@ -184,7 +187,7 @@ class IscsiClient(RestClient):
     @RestClient.api_put('/api/clientauth/{target_iqn}/{client_iqn}')
     def create_client_auth(self, target_iqn, client_iqn, username, password, mutual_username,
                            mutual_password, request=None):
-        logger.debug("iSCSI[%s] Creating client auth: %s/%s/%s/%s/%s/%s",
+        logger.debug("[%s] Creating client auth: %s/%s/%s/%s/%s/%s",
                      self.gateway_name, target_iqn, client_iqn, username, password, mutual_username,
                      mutual_password)
         return request({
@@ -196,7 +199,7 @@ class IscsiClient(RestClient):
 
     @RestClient.api_put('/api/hostgroup/{target_iqn}/{group_name}')
     def create_group(self, target_iqn, group_name, members, image_ids, request=None):
-        logger.debug("iSCSI[%s] Creating group: %s/%s", self.gateway_name, target_iqn, group_name)
+        logger.debug("[%s] Creating group: %s/%s", self.gateway_name, target_iqn, group_name)
         return request({
             'members': ','.join(members),
             'disks': ','.join(image_ids)
@@ -204,12 +207,12 @@ class IscsiClient(RestClient):
 
     @RestClient.api_delete('/api/hostgroup/{target_iqn}/{group_name}')
     def delete_group(self, target_iqn, group_name, request=None):
-        logger.debug("iSCSI[%s] Deleting group: %s/%s", self.gateway_name, target_iqn, group_name)
+        logger.debug("[%s] Deleting group: %s/%s", self.gateway_name, target_iqn, group_name)
         return request()
 
     @RestClient.api_put('/api/discoveryauth')
     def update_discoveryauth(self, user, password, mutual_user, mutual_password, request=None):
-        logger.debug("iSCSI[%s] Updating discoveryauth: %s/%s/%s/%s", self.gateway_name, user,
+        logger.debug("[%s] Updating discoveryauth: %s/%s/%s/%s", self.gateway_name, user,
                      password, mutual_user, mutual_password)
         return request({
             'username': user,
@@ -220,7 +223,7 @@ class IscsiClient(RestClient):
 
     @RestClient.api_put('/api/targetauth/{target_iqn}')
     def update_targetacl(self, target_iqn, action, request=None):
-        logger.debug("iSCSI[%s] Updating targetacl: %s/%s", self.gateway_name, target_iqn, action)
+        logger.debug("[%s] Updating targetacl: %s/%s", self.gateway_name, target_iqn, action)
         return request({
             'action': action
         })
@@ -228,7 +231,7 @@ class IscsiClient(RestClient):
     @RestClient.api_put('/api/targetauth/{target_iqn}')
     def update_targetauth(self, target_iqn, user, password, mutual_user, mutual_password,
                           request=None):
-        logger.debug("iSCSI[%s] Updating targetauth: %s/%s/%s/%s/%s", self.gateway_name,
+        logger.debug("[%s] Updating targetauth: %s/%s/%s/%s/%s", self.gateway_name,
                      target_iqn, user, password, mutual_user, mutual_password)
         return request({
             'username': user,
index 22746e8abd612efa13156aba038d473f511636fe..27a868cfdf0276b0183f31b98869937b51f5cdf7 100644 (file)
@@ -1,12 +1,17 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 
+import logging
+
 from orchestrator import InventoryFilter
 from orchestrator import OrchestratorClientMixin, raise_if_exception, OrchestratorError
-from .. import mgr, logger
+from .. import mgr
 from ..tools import wraps
 
 
+logger = logging.getLogger('orchestrator')
+
+
 # pylint: disable=abstract-method
 class OrchestratorAPI(OrchestratorClientMixin):
     def __init__(self):
@@ -16,7 +21,7 @@ class OrchestratorAPI(OrchestratorClientMixin):
     def status(self):
         try:
             status, desc = super(OrchestratorAPI, self).available()
-            logger.info("[ORCH] is orchestrator available: %s, %s", status, desc)
+            logger.info("is orchestrator available: %s, %s", status, desc)
             return dict(available=status, description=desc)
         except (RuntimeError, OrchestratorError, ImportError):
             return dict(available=False,
index 65094d5a95b4f267f121d90b44d8b5cb5c209cd2..d8cf7521fae3bc755e92ddc0bffaa95a25b22e95 100644 (file)
@@ -10,9 +10,13 @@ using the same structure of dashboard tasks
 from __future__ import absolute_import
 
 from datetime import datetime
+import logging
 
-from .. import mgr, logger
 from . import rbd
+from .. import mgr
+
+
+logger = logging.getLogger('progress')
 
 
 def _progress_event_to_dashboard_task_common(event, task):
@@ -79,11 +83,11 @@ def get_progress_tasks():
     progress_events = mgr.remote('progress', "_json")
 
     for ev in progress_events['events']:
-        logger.debug("[Progress] event=%s", ev)
+        logger.debug("event=%s", ev)
         executing_t.append(_progress_event_to_dashboard_task(ev))
 
     for ev in progress_events['completed']:
-        logger.debug("[Progress] finished event=%s", ev)
+        logger.debug("finished event=%s", ev)
         finished_t.append(_progress_event_to_dashboard_task(ev, True))
 
     return executing_t, finished_t
index fc841a1787c03b2e2bf1dd18ce4756608f896b43..a3c7b6edc63ba520da09b8e8083d0490fa34b724 100644 (file)
@@ -2,6 +2,7 @@
 from __future__ import absolute_import
 
 import re
+import logging
 import ipaddress
 from distutils.util import strtobool
 import xml.etree.ElementTree as ET  # noqa: N814
@@ -10,7 +11,7 @@ from ..awsauth import S3Auth
 from ..settings import Settings, Options
 from ..rest_client import RestClient, RequestException
 from ..tools import build_url, dict_contains_path, json_str_to_object, partial_dict
-from .. import mgr, logger
+from .. import mgr
 
 try:
     from typing import Any, Dict, List  # pylint: disable=unused-import
@@ -18,6 +19,9 @@ except ImportError:
     pass  # For typing only
 
 
+logger = logging.getLogger('rgw_client')
+
+
 class NoCredentialsException(RequestException):
     def __init__(self):
         super(NoCredentialsException, self).__init__(
index 00fd61c141d65697ed29286a2278b8171741098f..6a6ae38b2c7dccd9f3395ac8bab75185f2ec38ad 100644 (file)
@@ -5,18 +5,22 @@ from __future__ import absolute_import
 import os
 import errno
 import json
+import logging
 import threading
 import warnings
 
 import six
 from six.moves.urllib import parse
 
-from .. import mgr, logger
+from .. import mgr
 from ..tools import prepare_url_prefix
 
 if six.PY2:
     FileNotFoundError = IOError  # pylint: disable=redefined-builtin
 
+
+logger = logging.getLogger('sso')
+
 try:
     from onelogin.saml2.settings import OneLogin_Saml2_Settings as Saml2Settings
     from onelogin.saml2.errors import OneLogin_Saml2_Error as Saml2Error
@@ -71,17 +75,17 @@ class SsoDB(object):
         return "{}{}".format(cls.SSODB_CONFIG_KEY, version)
 
     def check_and_update_db(self):
-        logger.debug("SSO: Checking for previews DB versions")
+        logger.debug("Checking for previous DB versions")
         if self.VERSION != 1:
             raise NotImplementedError()
 
     @classmethod
     def load(cls):
-        logger.info("SSO: Loading SSO DB version=%s", cls.VERSION)
+        logger.info("Loading SSO DB version=%s", cls.VERSION)
 
         json_db = mgr.get_store(cls.ssodb_config_key(), None)
         if json_db is None:
-            logger.debug("SSO: No DB v%s found, creating new...", cls.VERSION)
+            logger.debug("No DB v%s found, creating new...", cls.VERSION)
             db = cls(cls.VERSION, '', Saml2({}))
             # check if we can update from a previous version database
             db.check_and_update_db()
index ef456cf0c66658483869389b50b4ba8d08a05bdb..17bbc600bd831916c0d0c0f1ffd84360107b36a6 100644 (file)
@@ -3,6 +3,7 @@
 from __future__ import absolute_import
 
 import json
+import logging
 import threading
 import time
 
@@ -12,7 +13,7 @@ from cherrypy.test import helper
 
 from mgr_module import CLICommand
 
-from .. import logger, mgr
+from .. import mgr
 from ..controllers import json_error_page, generate_controller_routes
 from ..services.auth import AuthManagerTool
 from ..services.exception import dashboard_exception_handler
@@ -25,6 +26,9 @@ PLUGIN_MANAGER.hook.init()
 PLUGIN_MANAGER.hook.register_commands()
 
 
+logger = logging.getLogger('tests')
+
+
 class CmdException(Exception):
     def __init__(self, retcode, message):
         super(CmdException, self).__init__(message)
index 312123db91788d2430aa98a179907345d7ed4d0f..14bac9e0aea1388e7272364bec8544874a5af8b4 100644 (file)
@@ -6,6 +6,7 @@ import inspect
 import json
 import functools
 import ipaddress
+import logging
 
 import collections
 from datetime import datetime, timedelta
@@ -22,7 +23,7 @@ try:
 except ImportError:
     from urllib.parse import urljoin
 
-from . import logger, mgr
+from . import mgr
 from .exceptions import ViewCacheNoDataException
 from .settings import Settings
 from .services.auth import JwtManager
@@ -37,6 +38,7 @@ class RequestLoggingTool(cherrypy.Tool):
     def __init__(self):
         cherrypy.Tool.__init__(self, 'before_handler', self.request_begin,
                                priority=10)
+        self.logger = logging.getLogger('request')
 
     def _setup(self):
         cherrypy.Tool._setup(self)
@@ -49,8 +51,8 @@ class RequestLoggingTool(cherrypy.Tool):
         req = cherrypy.request
         user = JwtManager.get_username()
         # Log the request.
-        logger.debug('[%s:%s] [%s] [%s] %s', req.remote.ip, req.remote.port,
-                     req.method, user, req.path_info)
+        self.logger.debug('[%s:%s] [%s] [%s] %s', req.remote.ip, req.remote.port,
+                          req.method, user, req.path_info)
         # Audit the request.
         if Settings.AUDIT_API_ENABLED and req.method not in ['GET']:
             url = build_url(req.remote.ip, scheme=req.scheme,
@@ -74,16 +76,16 @@ class RequestLoggingTool(cherrypy.Tool):
             mgr.cluster_log('audit', mgr.CLUSTER_LOG_PRIO_INFO, msg)
 
     def request_error(self):
-        self._request_log(logger.error)
-        logger.error(cherrypy.response.body)
+        self._request_log(self.logger.error)
+        self.logger.error(cherrypy.response.body)
 
     def request_end(self):
         status = cherrypy.response.status[:3]
         if status in ["401"]:
             # log unauthorized accesses
-            self._request_log(logger.warning)
+            self._request_log(self.logger.warning)
         else:
-            self._request_log(logger.info)
+            self._request_log(self.logger.info)
 
     def _format_bytes(self, num):
         units = ['B', 'K', 'M', 'G']
@@ -149,13 +151,13 @@ class ViewCache(object):
             t1 = 0.0
             try:
                 t0 = time.time()
-                logger.debug("VC: starting execution of %s", self.fn)
+                self._view.logger.debug("starting execution of %s", self.fn)
                 val = self.fn(*self.args, **self.kwargs)
                 t1 = time.time()
             except Exception as ex:
                 with self._view.lock:
-                    logger.exception("Error while calling fn=%s ex=%s", self.fn,
-                                     str(ex))
+                    self._view.logger.exception("Error while calling fn=%s ex=%s", self.fn,
+                                                str(ex))
                     self._view.value = None
                     self._view.value_when = None
                     self._view.getter_thread = None
@@ -168,8 +170,8 @@ class ViewCache(object):
                     self._view.getter_thread = None
                     self._view.exception = None
 
-            logger.debug("VC: execution of %s finished in: %s", self.fn,
-                         t1 - t0)
+            self._view.logger.debug("execution of %s finished in: %s", self.fn,
+                                    t1 - t0)
             self.event.set()
 
     class RemoteViewCache(object):
@@ -186,6 +188,7 @@ class ViewCache(object):
             self.latency = 0
             self.exception = None
             self.lock = threading.Lock()
+            self.logger = logging.getLogger('viewcache')
 
         def reset(self):
             with self.lock:
@@ -217,7 +220,7 @@ class ViewCache(object):
                                                                 kwargs)
                     self.getter_thread.start()
                 else:
-                    logger.debug("VC: getter_thread still alive for: %s", fn)
+                    self.logger.debug("getter_thread still alive for: %s", fn)
 
                 ev = self.getter_thread.event
 
@@ -276,7 +279,8 @@ class NotificationQueue(threading.Thread):
                 return
             cls._running = True
             cls._instance = NotificationQueue()
-        logger.debug("starting notification queue")
+        cls.logger = logging.getLogger('notification_queue')
+        cls.logger.debug("starting notification queue")
         cls._instance.start()
 
     @classmethod
@@ -290,9 +294,9 @@ class NotificationQueue(threading.Thread):
             cls._running = False
         with cls._cond:
             cls._cond.notify()
-        logger.debug("waiting for notification queue to finish")
+        cls.logger.debug("waiting for notification queue to finish")
         instance.join()
-        logger.debug("notification queue stopped")
+        cls.logger.debug("notification queue stopped")
 
     @classmethod
     def _registered_handler(cls, func, n_types):
@@ -323,8 +327,8 @@ class NotificationQueue(threading.Thread):
             for ev_type in n_types:
                 if not cls._registered_handler(func, ev_type):
                     cls._listeners[ev_type].add((priority, func))
-                    logger.debug("NQ: function %s was registered for events of"
-                                 " type %s", func, ev_type)
+                    cls.logger.debug("function %s was registered for events of"
+                                     " type %s", func, ev_type)
 
     @classmethod
     def deregister(cls, func, n_types=None):
@@ -354,8 +358,8 @@ class NotificationQueue(threading.Thread):
                         break
                 if to_remove:
                     listeners.discard(to_remove)
-                    logger.debug("NQ: function %s was deregistered for events "
-                                 "of type %s", func, ev_type)
+                    cls.logger.debug("function %s was deregistered for events "
+                                     "of type %s", func, ev_type)
 
     @classmethod
     def new_notification(cls, notify_type, notify_value):
@@ -375,10 +379,10 @@ class NotificationQueue(threading.Thread):
                 listener[1](notify_value)
 
     def run(self):
-        logger.debug("notification queue started")
+        self.logger.debug("notification queue started")
         while self._running:
             private_buffer = []
-            logger.debug("NQ: processing queue: %s", len(self._queue))
+            self.logger.debug("processing queue: %s", len(self._queue))
             try:
                 while True:
                     private_buffer.append(self._queue.popleft())
@@ -389,10 +393,10 @@ class NotificationQueue(threading.Thread):
                 while self._running and not self._queue:
                     self._cond.wait()
         # flush remaining events
-        logger.debug("NQ: flush remaining events: %s", len(self._queue))
+        self.logger.debug("flush remaining events: %s", len(self._queue))
         self._notify_listeners(self._queue)
         self._queue.clear()
-        logger.debug("notification queue finished")
+        self.logger.debug("notification queue finished")
 
 
 # pylint: disable=too-many-arguments, protected-access
@@ -411,11 +415,12 @@ class TaskManager(object):
 
     @classmethod
     def init(cls):
+        cls.logger = logging.getLogger('taskmgr')
         NotificationQueue.register(cls._handle_finished_task, 'cd_task_finished')
 
     @classmethod
     def _handle_finished_task(cls, task):
-        logger.info("TM: finished %s", task)
+        cls.logger.info("finished %s", task)
         with cls._lock:
             cls._executing_tasks.remove(task)
             cls._finished_tasks.append(task)
@@ -433,13 +438,13 @@ class TaskManager(object):
                     exception_handler)
         with cls._lock:
             if task in cls._executing_tasks:
-                logger.debug("TM: task already executing: %s", task)
+                cls.logger.debug("task already executing: %s", task)
                 for t in cls._executing_tasks:
                     if t == task:
                         return t
-            logger.debug("TM: created %s", task)
+            cls.logger.debug("created %s", task)
             cls._executing_tasks.add(task)
-        logger.info("TM: running %s", task)
+        cls.logger.info("running %s", task)
         task._run()
         return task
 
@@ -507,6 +512,7 @@ class TaskManager(object):
 # pylint: disable=protected-access
 class TaskExecutor(object):
     def __init__(self):
+        self.logger = logging.getLogger('taskexec')
         self.task = None
 
     def init(self, task):
@@ -514,18 +520,18 @@ class TaskExecutor(object):
 
     # pylint: disable=broad-except
     def start(self):
-        logger.debug("EX: executing task %s", self.task)
+        self.logger.debug("executing task %s", self.task)
         try:
             self.task.fn(*self.task.fn_args, **self.task.fn_kwargs)
         except Exception as ex:
-            logger.exception("Error while calling %s", self.task)
+            self.logger.exception("Error while calling %s", self.task)
             self.finish(None, ex)
 
     def finish(self, ret_value, exception):
         if not exception:
-            logger.debug("EX: successfully finished task: %s", self.task)
+            self.logger.debug("successfully finished task: %s", self.task)
         else:
-            logger.debug("EX: task finished with exception: %s", self.task)
+            self.logger.debug("task finished with exception: %s", self.task)
         self.task._complete(ret_value, exception)
 
 
@@ -542,10 +548,10 @@ class ThreadedExecutor(TaskExecutor):
     def _run(self):
         TaskManager._task_local_data.task = self.task
         try:
-            logger.debug("TEX: executing task %s", self.task)
+            self.logger.debug("executing task %s", self.task)
             val = self.task.fn(*self.task.fn_args, **self.task.fn_kwargs)
         except Exception as ex:
-            logger.exception("Error while calling %s", self.task)
+            self.logger.exception("Error while calling %s", self.task)
             self.finish(None, ex)
         else:
             self.finish(val, None)
@@ -569,6 +575,7 @@ class Task(object):
         self.end_time = None
         self.duration = 0
         self.exception = None
+        self.logger = logging.getLogger('task')
         self.lock = threading.Lock()
 
     def __hash__(self):
@@ -611,8 +618,8 @@ class Task(object):
             if not self.exception:
                 self.set_progress(100, True)
         NotificationQueue.new_notification('cd_task_finished', self)
-        logger.debug("TK: execution of %s finished in: %s s", self,
-                     self.duration)
+        self.logger.debug("execution of %s finished in: %s s", self,
+                          self.duration)
 
     def _handle_task_finished(self, task):
         if self == task: