if 'UNITTEST' not in os.environ:
- class _LoggerProxy(object):
- def __init__(self):
- self._logger = None
-
- def __getattr__(self, item):
- if self._logger is None:
- raise AttributeError("logger not initialized")
- return getattr(self._logger, item)
-
class _ModuleProxy(object):
def __init__(self):
self._mgr = None
def init(self, module_inst):
- global logger
self._mgr = module_inst
- logger._logger = self._mgr._logger
def __getattr__(self, item):
if self._mgr is None:
return getattr(self._mgr, item)
mgr = _ModuleProxy()
- logger = _LoggerProxy()
# DO NOT REMOVE: required for ceph-mgr to load a module
from .module import Module, StandbyModule # noqa: F401
else:
import logging
logging.basicConfig(level=logging.DEBUG)
- logger = logging.getLogger(__name__)
logging.root.handlers[0].setLevel(logging.DEBUG)
os.environ['PATH'] = '{}:{}'.format(os.path.abspath('../../../../build/bin'),
os.environ['PATH'])
import importlib
import inspect
import json
+import logging
import os
import pkgutil
import re
# pylint: disable=wrong-import-position
import cherrypy
-from .. import logger
from ..security import Scope, Permission
from ..tools import wraps, getargspec, TaskManager, get_request_body_params
from ..exceptions import ScopeNotValid, PermissionNotValid
class Controller(object):
def __init__(self, path, base_url=None, security_scope=None, secure=True):
if security_scope and not Scope.valid_scope(security_scope):
- logger.debug("Invalid security scope name: %s\n Possible values: "
- "%s", security_scope, Scope.all_scopes())
raise ScopeNotValid(security_scope)
self.path = path
self.base_url = base_url
def load_controllers():
+ logger = logging.getLogger('controller.load')
# setting sys.path properly when not running under the mgr
controllers_dir = os.path.dirname(os.path.realpath(__file__))
dashboard_dir = os.path.dirname(controllers_dir)
mgr_dir = os.path.dirname(dashboard_dir)
- logger.debug("LC: controllers_dir=%s", controllers_dir)
- logger.debug("LC: dashboard_dir=%s", dashboard_dir)
- logger.debug("LC: mgr_dir=%s", mgr_dir)
+ logger.debug("controllers_dir=%s", controllers_dir)
+ logger.debug("dashboard_dir=%s", dashboard_dir)
+ logger.debug("mgr_dir=%s", mgr_dir)
if mgr_dir not in sys.path:
sys.path.append(mgr_dir)
controllers = []
mods = [mod for _, mod, _ in pkgutil.iter_modules([controllers_dir])]
- logger.debug("LC: mods=%s", mods)
+ logger.debug("mods=%s", mods)
for mod_name in mods:
mod = importlib.import_module('.controllers.{}'.format(mod_name),
package='dashboard')
url = "{}{}".format(base_url, endp_url)
+ logger = logging.getLogger('controller')
logger.debug("Mapped [%s] to %s:%s restricted to %s",
url, ctrl_class.__name__, endpoint.action,
endpoint.method)
parent_urls.add(generate_controller_routes(endpoint, mapper,
"{}".format(url_prefix)))
+ logger = logging.getLogger('controller')
logger.debug("list of parent paths: %s", parent_urls)
return mapper, parent_urls
self.action)
def __init__(self):
+ logger = logging.getLogger('controller')
logger.info('Initializing controller: %s -> %s',
self.__class__.__name__, self._cp_path_)
for perm in permissions:
if not Permission.valid_permission(perm):
+ logger = logging.getLogger('controller.set_func_perms')
logger.debug("Invalid security permission: %s\n "
"Possible values: %s", perm,
Permission.all_permissions())
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+import logging
import cherrypy
from . import ApiController, RESTController
-from .. import logger, mgr
+from .. import mgr
from ..exceptions import DashboardException
from ..services.auth import AuthManager, JwtManager
+logger = logging.getLogger('controllers.auth')
+
+
@ApiController('/auth', secure=False)
class Auth(RESTController):
"""
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+import logging
import cherrypy
from . import Controller, BaseController, Endpoint, ENDPOINT_MAP
-from .. import logger, mgr
+from .. import mgr
from ..tools import str_to_bool
+logger = logging.getLogger('controllers.docs')
+
+
@Controller('/docs', secure=False)
class Docs(BaseController):
host = cherrypy.request.base
host = host[host.index(':')+3:]
- logger.debug("DOCS: Host: %s", host)
+ logger.debug("Host: %s", host)
paths = self._gen_paths(all_endpoints, base_url)
--- /dev/null
+from __future__ import absolute_import
+
+import logging
+
+from . import UiApiController, BaseController, Endpoint
+
+
+logger = logging.getLogger('frontend.error')
+
+
+@UiApiController('/logging', secure=False)
+class FrontendLogging(BaseController):
+
+ @Endpoint('POST', path='js-error')
+ def jsError(self, url, message, stack=None): # noqa: N802
+ logger.error('(%s): %s\n %s\n', url, message, stack)
import os
import re
import json
+import logging
try:
from functools import lru_cache
except ImportError:
from cherrypy.lib.static import serve_file
from . import Controller, UiApiController, BaseController, Proxy, Endpoint
-from .. import mgr, logger
+from .. import mgr
+
+
+logger = logging.getLogger("controllers.home")
LANGUAGES = {f for f in os.listdir(mgr.get_frontend_path())
+++ /dev/null
-from . import UiApiController, BaseController, Endpoint
-from .. import logger
-
-
-@UiApiController('/logging', secure=False)
-class Logging(BaseController):
-
- @Endpoint('POST', path='js-error')
- def jsError(self, url, message, stack=None): # noqa: N802
- logger.error('frontend error (%s): %s\n %s\n', url, message, stack)
from __future__ import absolute_import
from functools import partial
+import logging
import cherrypy
import cephfs
from . import ApiController, RESTController, UiApiController, BaseController, \
Endpoint, Task, ReadPermission, ControllerDoc, EndpointDoc
-from .. import logger
from ..security import Scope
from ..services.cephfs import CephFS
from ..services.cephx import CephX
from ..services.rgw_client import RgwClient
+logger = logging.getLogger('controllers.ganesha')
+
+
# documentation helpers
EXPORT_SCHEMA = {
'export_id': (int, 'Export ID'),
root_dir = "/"
depth = int(depth)
if depth > 5:
- logger.warning("[NFS] Limiting depth to maximum value of 5: "
+ logger.warning("Limiting depth to maximum value of 5: "
"input depth=%s", depth)
depth = 5
root_dir = '{}{}'.format(root_dir.rstrip('/'), '/')
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
+import logging
from . import ApiController, RESTController, Endpoint, ReadPermission, UpdatePermission
-from .. import mgr, logger
+from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService, SendCommandError
from ..services.exception import handle_send_command_error
pass # For typing only
+logger = logging.getLogger('controllers.osd')
+
+
@ApiController('/osd', Scope.OSD)
class Osd(RESTController):
def list(self):
for _, dev_data in dev_smart_data.items():
if 'error' in dev_data:
logger.warning(
- '[OSD] Error retrieving smartctl data for device ID %s: %s', dev_id,
+ 'Error retrieving smartctl data for device ID %s: %s', dev_id,
dev_smart_data)
smart_data.update(dev_smart_data)
return smart_data
import json
import re
+import logging
from functools import partial
RESTController
from .rbd import _rbd_call
-from .. import logger, mgr
+from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService
from ..tools import ViewCache
serialize_dashboard_exception
+logger = logging.getLogger('controllers.rbd_mirror')
+
+
# pylint: disable=not-callable
def handle_rbd_mirror_error():
def composed_decorator(func):
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+import logging
import json
import cherrypy
from . import ApiController, BaseController, RESTController, Endpoint, \
ReadPermission
-from .. import logger
from ..exceptions import DashboardException
from ..rest_client import RequestException
from ..security import Scope
from ..tools import json_str_to_object
+logger = logging.getLogger('controllers.rgw')
+
+
@ApiController('/rgw', Scope.RGW)
class Rgw(BaseController):
from __future__ import absolute_import
import json
+import logging
import os
import time
import requests
-from . import logger
from .exceptions import GrafanaError
from .settings import Settings
+logger = logging.getLogger('grafana')
+
+
class GrafanaRestClient(object):
@staticmethod
import collections
import errno
+import logging
import os
import socket
import tempfile
cherrypy.engine.subscribe('stop', __cov.stop)
# pylint: disable=wrong-import-position
-from . import logger, mgr
+from . import mgr
from .controllers import generate_routes, json_error_page
from .grafana import push_local_dashboards
from .tools import NotificationQueue, RequestLoggingTool, TaskManager, \
os._exit = os_exit_noop
+logger = logging.getLogger(__name__)
+
+
class CherryPyConfig(object):
"""
Class for common server configuration done by both active and
class CanLog(Mixin):
- from .. import logger
- log = logger
+ import logging
+ log = logging.getLogger()
class CanCherrypy(Mixin):
"""
from __future__ import absolute_import
-from .settings import Settings
-from .tools import build_url
import inspect
+import logging
import re
import requests
from requests.exceptions import ConnectionError, InvalidURL, Timeout
-from . import logger
+from .settings import Settings
+from .tools import build_url
try:
from requests.packages.urllib3.exceptions import SSLError
from urllib3.exceptions import SSLError
+logger = logging.getLogger('rest_client')
+
+
class TimeoutRequestsSession(requests.Session):
"""
Set timeout argument for all requests if this is not already done.
import errno
import json
+import logging
import threading
import time
import re
from mgr_module import CLIReadCommand, CLIWriteCommand
-from .. import mgr, logger
+from .. import mgr
from ..security import Scope, Permission
from ..exceptions import RoleAlreadyExists, RoleDoesNotExist, ScopeNotValid, \
PermissionNotValid, RoleIsAssociatedWithUser, \
RoleNotInUser
+logger = logging.getLogger('access_control')
+
+
# password hashing algorithm
def password_hash(password, salt_password=None):
if not password:
del self.roles[name]
def create_user(self, username, password, name, email, enabled=True):
- logger.debug("AC: creating user: username=%s", username)
+ logger.debug("creating user: username=%s", username)
with self.lock:
if username in self.users:
raise UserAlreadyExists(username)
return "{}{}".format(cls.ACDB_CONFIG_KEY, version)
def check_and_update_db(self):
- logger.debug("AC: Checking for previews DB versions")
+ logger.debug("Checking for previews DB versions")
def check_migrate_v0_to_current():
# check if there is username/password from previous version
username = mgr.get_module_option('username', None)
password = mgr.get_module_option('password', None)
if username and password:
- logger.debug("AC: Found single user credentials: user=%s", username)
+ logger.debug("Found single user credentials: user=%s", username)
# found user credentials
user = self.create_user(username, "", None, None)
# password is already hashed, so setting manually
# Check if version 1 exists in the DB and migrate it to current version
v1_db = mgr.get_store(self.accessdb_config_key(1))
if v1_db:
- logger.debug("AC: Found database v1 credentials")
+ logger.debug("Found database v1 credentials")
v1_db = json.loads(v1_db)
for user, _ in v1_db['users'].items():
@classmethod
def load(cls):
- logger.info("AC: Loading user roles DB version=%s", cls.VERSION)
+ logger.info("Loading user roles DB version=%s", cls.VERSION)
json_db = mgr.get_store(cls.accessdb_config_key())
if json_db is None:
- logger.debug("AC: No DB v%s found, creating new...", cls.VERSION)
+ logger.debug("No DB v%s found, creating new...", cls.VERSION)
db = cls(cls.VERSION, {}, {})
# check if we can update from a previous version database
db.check_and_update_db()
from base64 import b64encode
import json
+import logging
import os
import threading
import time
import jwt
from .access_control import LocalAuthenticator, UserDoesNotExist
-from .. import mgr, logger
+from .. import mgr
class JwtManager(object):
@classmethod
def init(cls):
+ cls.logger = logging.getLogger('jwt')
# generate a new secret if it does not exist
secret = mgr.get_store('jwt_secret')
if secret is None:
user = AuthManager.get_user(dtoken['username'])
if user.last_update <= dtoken['iat']:
return user
- logger.debug("AMT: user info changed after token was issued, iat=%s last_update=%s",
- dtoken['iat'], user.last_update)
+ cls.logger.debug("user info changed after token was issued, iat=%s last_update=%s",
+ dtoken['iat'], user.last_update)
else:
- logger.debug('AMT: Token is black-listed')
+ cls.logger.debug('Token is black-listed')
except jwt.exceptions.ExpiredSignatureError:
- logger.debug("AMT: Token has expired")
+ cls.logger.debug("Token has expired")
except jwt.exceptions.InvalidTokenError:
- logger.debug("AMT: Failed to decode token")
+ cls.logger.debug("Failed to decode token")
except UserDoesNotExist:
- logger.debug("AMT: Invalid token: user %s does not exist", dtoken['username'])
+ cls.logger.debug("Invalid token: user %s does not exist", dtoken['username'])
return None
@classmethod
def __init__(self):
super(AuthManagerTool, self).__init__(
'before_handler', self._check_authentication, priority=20)
+ self.logger = logging.getLogger('auth')
def _check_authentication(self):
JwtManager.reset_user()
token = JwtManager.get_token_from_header()
- logger.debug("AMT: token: %s", token)
+ self.logger.debug("token: %s", token)
if token:
user = JwtManager.get_user(token)
if user:
self._check_authorization(user.username)
return
- logger.debug('AMT: Unauthorized access to %s',
- cherrypy.url(relative='server'))
+ self.logger.debug('Unauthorized access to %s',
+ cherrypy.url(relative='server'))
raise cherrypy.HTTPError(401, 'You are not authorized to access '
'that resource')
def _check_authorization(self, username):
- logger.debug("AMT: checking authorization...")
+ self.logger.debug("checking authorization...")
username = username
handler = cherrypy.request.handler.callable
controller = handler.__self__
# controller does not define any authorization restrictions
return
- logger.debug("AMT: checking '%s' access to '%s' scope", sec_perms,
- sec_scope)
+ self.logger.debug("checking '%s' access to '%s' scope", sec_perms,
+ sec_scope)
if not sec_perms:
- logger.debug("Fail to check permission on: %s:%s", controller,
- handler)
+ self.logger.debug("Fail to check permission on: %s:%s", controller,
+ handler)
raise cherrypy.HTTPError(403, "You don't have permissions to "
"access that resource")
from __future__ import absolute_import
import json
+import logging
import rados
next(b, None)
return zip(a, b)
-from .. import logger, mgr
+from .. import mgr
try:
from typing import Dict, Any # pylint: disable=unused-import
pass # For typing only
+logger = logging.getLogger('ceph_service')
+
+
class SendCommandError(rados.Error):
def __init__(self, err, prefix, argdict, errno):
self.prefix = prefix
mgr.send_command(result, srv_type, srv_spec, json.dumps(argdict), "")
r, outb, outs = result.wait()
if r != 0:
- msg = "send_command '{}' failed. (r={}, outs=\"{}\", kwargs={})".format(prefix, r, outs,
- kwargs)
- logger.error(msg)
+ logger.error("send_command '%s' failed. (r=%s, outs=\"%s\", kwargs=%s)", prefix, r,
+ outs, kwargs)
raise SendCommandError(outs, prefix, argdict, r)
try:
from __future__ import absolute_import
from contextlib import contextmanager
+import logging
import datetime
import os
import six
import cephfs
-from .. import mgr, logger
+from .. import mgr
+
+
+logger = logging.getLogger('cephfs')
class CephFS(object):
return fs_info[0]['mdsmap']['fs_name']
def __init__(self, fs_name=None):
- logger.debug("[CephFS] initializing cephfs connection")
+ logger.debug("initializing cephfs connection")
self.cfs = cephfs.LibCephFS(rados_inst=mgr.rados)
- logger.debug("[CephFS] mounting cephfs filesystem: %s", fs_name)
+ logger.debug("mounting cephfs filesystem: %s", fs_name)
if fs_name:
self.cfs.mount(filesystem_name=fs_name)
else:
self.cfs.mount()
- logger.debug("[CephFS] mounted cephfs filesystem")
+ logger.debug("mounted cephfs filesystem")
def __del__(self):
- logger.debug("[CephFS] shutting down cephfs filesystem")
+ logger.debug("shutting down cephfs filesystem")
self.cfs.shutdown()
@contextmanager
"""
if isinstance(path, six.string_types):
path = path.encode()
- logger.debug("[CephFS] get_dir_list dir_path=%s level=%s",
- path, level)
+ logger.debug("get_dir_list dirpath=%s level=%s", path,
+ level)
if level == 0:
return [path]
- logger.debug("[CephFS] opening dir_path=%s", path)
+ logger.debug("opening dirpath=%s", path)
with self.opendir(path) as d:
dent = self.cfs.readdir(d)
paths = [path]
while dent:
- logger.debug("[CephFS] found entry=%s", dent.d_name)
+ logger.debug("found entry=%s", dent.d_name)
if dent.d_name in [b'.', b'..']:
dent = self.cfs.readdir(d)
continue
if dent.is_dir():
- logger.debug("[CephFS] found dir=%s", dent.d_name)
+ logger.debug("found dir=%s", dent.d_name)
subdir_path = os.path.join(path, dent.d_name)
paths.extend(self.ls_dir(subdir_path, level - 1))
dent = self.cfs.readdir(d)
raise Exception('Cannot create root directory "/"')
if self.dir_exists(path):
return
- logger.info("[CephFS] Creating directory: %s", path)
+ logger.info("Creating directory: %s", path)
self.cfs.mkdirs(path, 0o755)
def rm_dir(self, path):
raise Exception('Cannot remove root directory "/"')
if not self.dir_exists(path):
return
- logger.info("[CephFS] Removing directory: %s", path)
+ logger.info("Removing directory: %s", path)
self.cfs.rmdir(path)
def mk_snapshot(self, path, name=None, mode=0o755):
name = now.replace(tzinfo=tz).isoformat('T')
client_snapdir = self.cfs.conf_get('client_snapdir')
snapshot_path = os.path.join(path, client_snapdir, name)
- logger.info("[CephFS] Creating snapshot: %s", snapshot_path)
+ logger.info("Creating snapshot: %s", snapshot_path)
self.cfs.mkdir(snapshot_path, mode)
return name
"""
client_snapdir = self.cfs.conf_get('client_snapdir')
snapshot_path = os.path.join(path, client_snapdir, name)
- logger.info("[CephFS] Removing snapshot: %s", snapshot_path)
+ logger.info("Removing snapshot: %s", snapshot_path)
self.cfs.rmdir(snapshot_path)
def get_quotas(self, path):
import json
from contextlib import contextmanager
+import logging
import six
import cherrypy
import rbd
import rados
-from .. import logger
from ..services.ceph_service import SendCommandError
from ..exceptions import ViewCacheNoDataException, DashboardException
from ..tools import wraps
+
+logger = logging.getLogger('exception')
+
+
if six.PY2:
# Monkey-patch a __call__ method into @contextmanager to make
# it compatible to Python 3
- from contextlib import GeneratorContextManager # pylint: disable=no-name-in-module
+ # pylint: disable=no-name-in-module,ungrouped-imports
+ from contextlib import GeneratorContextManager
def init(self, *args):
if len(args) == 1:
return handler(*args, **kwargs)
# Don't catch cherrypy.* Exceptions.
except (ViewCacheNoDataException, DashboardException) as e:
- logger.exception('dashboard_exception_handler')
+ logger.exception('Dashboard Exception')
cherrypy.response.headers['Content-Type'] = 'application/json'
cherrypy.response.status = getattr(e, 'status', 400)
return json.dumps(serialize_dashboard_exception(e)).encode('utf-8')
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+import logging
import re
from orchestrator import OrchestratorError
from .cephx import CephX
from .orchestrator import OrchClient
from .rgw_client import RgwClient, RequestException, NoCredentialsException
-from .. import mgr, logger
+from .. import mgr
from ..settings import Settings
from ..exceptions import DashboardException
+logger = logging.getLogger('ganesha')
+
+
class NFSException(DashboardException):
def __init__(self, msg):
super(NFSException, self).__init__(component="nfs", msg=msg)
@classmethod
def reload_daemons(cls, cluster_id, daemons_id):
- logger.debug("[NFS] issued reload of daemons: %s", daemons_id)
+ logger.debug("issued reload of daemons: %s", daemons_id)
if not OrchClient.instance().available():
- logger.debug("[NFS] orchestrator not available")
+ logger.debug("orchestrator not available")
return
reload_list = []
daemons = cls.get_daemons_status()
@classmethod
def from_export_block(cls, export_block, cluster_id, defaults):
- logger.debug("[NFS] parsing export block: %s", export_block)
+ logger.debug("parsing export block: %s", export_block)
fsal_block = [b for b in export_block['_blocks_']
if b['block_name'] == "FSAL"]
size, _ = obj.stat()
raw_config = obj.read(size)
raw_config = raw_config.decode("utf-8")
- logger.debug("[NFS] read export configuration from rados "
+ logger.debug("read export configuration from rados "
"object %s/%s/%s:\n%s", self.rados_pool,
self.rados_namespace, obj.key, raw_config)
self.export_conf_blocks.extend(
size, _ = obj.stat()
raw_config = obj.read(size)
raw_config = raw_config.decode("utf-8")
- logger.debug("[NFS] read daemon configuration from rados "
+ logger.debug("read daemon configuration from rados "
"object %s/%s/%s:\n%s", self.rados_pool,
self.rados_namespace, obj.key, raw_config)
idx = obj.key.find('-')
ioctx.set_namespace(self.rados_namespace)
ioctx.write_full(obj, raw_config.encode('utf-8'))
logger.debug(
- "[NFS] write configuration into rados object %s/%s/%s:\n%s",
+ "write configuration into rados object %s/%s/%s:\n%s",
self.rados_pool, self.rados_namespace, obj, raw_config)
@classmethod
if squash.lower() in ["all", "all_squash", "allsquash",
"all_anonymous", "allanonymous"]:
return "all_squash"
- logger.error("[NFS] could not parse squash value: %s", squash)
+ logger.error("could not parse squash value: %s", squash)
raise NFSException("'{}' is an invalid squash option".format(squash))
@classmethod
return 3
if str(protocol) in ["NFSV4", "4", "V4", "NFS4"]:
return 4
- logger.error("[NFS] could not parse protocol value: %s", protocol)
+ logger.error("could not parse protocol value: %s", protocol)
raise NFSException("'{}' is an invalid NFS protocol version identifier"
.format(protocol))
from __future__ import absolute_import
import json
+import logging
from requests.auth import HTTPBasicAuth
from urllib.parse import urlparse
from .iscsi_config import IscsiGatewaysConfig # pylint: disable=cyclic-import
-from .. import logger
from ..settings import Settings
from ..rest_client import RestClient
+logger = logging.getLogger('iscsi_client')
+
+
class IscsiClient(RestClient):
_CLIENT_NAME = 'iscsi'
_instances = {}
@RestClient.api_put('/api/target/{target_iqn}')
def create_target(self, target_iqn, target_controls, request=None):
- logger.debug("iSCSI[%s] Creating target: %s", self.gateway_name, target_iqn)
+ logger.debug("[%s] Creating target: %s", self.gateway_name, target_iqn)
return request({
'controls': json.dumps(target_controls)
})
@RestClient.api_delete('/api/target/{target_iqn}')
def delete_target(self, target_iqn, request=None):
- logger.debug("iSCSI[%s] Deleting target: %s", self.gateway_name, target_iqn)
+ logger.debug("[%s] Deleting target: %s", self.gateway_name, target_iqn)
return request()
@RestClient.api_put('/api/target/{target_iqn}')
def reconfigure_target(self, target_iqn, target_controls, request=None):
- logger.debug("iSCSI[%s] Reconfiguring target: %s", self.gateway_name, target_iqn)
+ logger.debug("[%s] Reconfiguring target: %s", self.gateway_name, target_iqn)
return request({
'mode': 'reconfigure',
'controls': json.dumps(target_controls)
@RestClient.api_put('/api/gateway/{target_iqn}/{gateway_name}')
def create_gateway(self, target_iqn, gateway_name, ip_address, request=None):
- logger.debug("iSCSI[%s] Creating gateway: %s/%s", self.gateway_name, target_iqn,
+ logger.debug("[%s] Creating gateway: %s/%s", self.gateway_name, target_iqn,
gateway_name)
return request({
'ip_address': ','.join(ip_address),
@RestClient.api_delete('/api/gateway/{target_iqn}/{gateway_name}')
def delete_gateway(self, target_iqn, gateway_name, request=None):
- logger.debug("iSCSI: Deleting gateway: %s/%s", target_iqn, gateway_name)
+ logger.debug("Deleting gateway: %s/%s", target_iqn, gateway_name)
return request()
@RestClient.api_put('/api/disk/{pool}/{image}')
def create_disk(self, pool, image, backstore, wwn, request=None):
- logger.debug("iSCSI[%s] Creating disk: %s/%s", self.gateway_name, pool, image)
+ logger.debug("[%s] Creating disk: %s/%s", self.gateway_name, pool, image)
return request({
'mode': 'create',
'backstore': backstore,
@RestClient.api_delete('/api/disk/{pool}/{image}')
def delete_disk(self, pool, image, request=None):
- logger.debug("iSCSI[%s] Deleting disk: %s/%s", self.gateway_name, pool, image)
+ logger.debug("[%s] Deleting disk: %s/%s", self.gateway_name, pool, image)
return request({
'preserve_image': 'true'
})
@RestClient.api_put('/api/disk/{pool}/{image}')
def reconfigure_disk(self, pool, image, controls, request=None):
- logger.debug("iSCSI[%s] Reconfiguring disk: %s/%s", self.gateway_name, pool, image)
+ logger.debug("[%s] Reconfiguring disk: %s/%s", self.gateway_name, pool, image)
return request({
'controls': json.dumps(controls),
'mode': 'reconfigure'
@RestClient.api_put('/api/targetlun/{target_iqn}')
def create_target_lun(self, target_iqn, image_id, lun, request=None):
- logger.debug("iSCSI[%s] Creating target lun: %s/%s", self.gateway_name, target_iqn,
+ logger.debug("[%s] Creating target lun: %s/%s", self.gateway_name, target_iqn,
image_id)
return request({
'disk': image_id,
@RestClient.api_delete('/api/targetlun/{target_iqn}')
def delete_target_lun(self, target_iqn, image_id, request=None):
- logger.debug("iSCSI[%s] Deleting target lun: %s/%s", self.gateway_name, target_iqn,
+ logger.debug("[%s] Deleting target lun: %s/%s", self.gateway_name, target_iqn,
image_id)
return request({
'disk': image_id
@RestClient.api_put('/api/client/{target_iqn}/{client_iqn}')
def create_client(self, target_iqn, client_iqn, request=None):
- logger.debug("iSCSI[%s] Creating client: %s/%s", self.gateway_name, target_iqn, client_iqn)
+ logger.debug("[%s] Creating client: %s/%s", self.gateway_name, target_iqn, client_iqn)
return request()
@RestClient.api_delete('/api/client/{target_iqn}/{client_iqn}')
def delete_client(self, target_iqn, client_iqn, request=None):
- logger.debug("iSCSI[%s] Deleting client: %s/%s", self.gateway_name, target_iqn, client_iqn)
+ logger.debug("[%s] Deleting client: %s/%s", self.gateway_name, target_iqn, client_iqn)
return request()
@RestClient.api_put('/api/clientlun/{target_iqn}/{client_iqn}')
def create_client_lun(self, target_iqn, client_iqn, image_id, request=None):
- logger.debug("iSCSI[%s] Creating client lun: %s/%s", self.gateway_name, target_iqn,
+ logger.debug("[%s] Creating client lun: %s/%s", self.gateway_name, target_iqn,
client_iqn)
return request({
'disk': image_id
@RestClient.api_put('/api/clientauth/{target_iqn}/{client_iqn}')
def create_client_auth(self, target_iqn, client_iqn, username, password, mutual_username,
mutual_password, request=None):
- logger.debug("iSCSI[%s] Creating client auth: %s/%s/%s/%s/%s/%s",
+ logger.debug("[%s] Creating client auth: %s/%s/%s/%s/%s/%s",
self.gateway_name, target_iqn, client_iqn, username, password, mutual_username,
mutual_password)
return request({
@RestClient.api_put('/api/hostgroup/{target_iqn}/{group_name}')
def create_group(self, target_iqn, group_name, members, image_ids, request=None):
- logger.debug("iSCSI[%s] Creating group: %s/%s", self.gateway_name, target_iqn, group_name)
+ logger.debug("[%s] Creating group: %s/%s", self.gateway_name, target_iqn, group_name)
return request({
'members': ','.join(members),
'disks': ','.join(image_ids)
@RestClient.api_delete('/api/hostgroup/{target_iqn}/{group_name}')
def delete_group(self, target_iqn, group_name, request=None):
- logger.debug("iSCSI[%s] Deleting group: %s/%s", self.gateway_name, target_iqn, group_name)
+ logger.debug("[%s] Deleting group: %s/%s", self.gateway_name, target_iqn, group_name)
return request()
@RestClient.api_put('/api/discoveryauth')
def update_discoveryauth(self, user, password, mutual_user, mutual_password, request=None):
- logger.debug("iSCSI[%s] Updating discoveryauth: %s/%s/%s/%s", self.gateway_name, user,
+ logger.debug("[%s] Updating discoveryauth: %s/%s/%s/%s", self.gateway_name, user,
password, mutual_user, mutual_password)
return request({
'username': user,
@RestClient.api_put('/api/targetauth/{target_iqn}')
def update_targetacl(self, target_iqn, action, request=None):
- logger.debug("iSCSI[%s] Updating targetacl: %s/%s", self.gateway_name, target_iqn, action)
+ logger.debug("[%s] Updating targetacl: %s/%s", self.gateway_name, target_iqn, action)
return request({
'action': action
})
@RestClient.api_put('/api/targetauth/{target_iqn}')
def update_targetauth(self, target_iqn, user, password, mutual_user, mutual_password,
request=None):
- logger.debug("iSCSI[%s] Updating targetauth: %s/%s/%s/%s/%s", self.gateway_name,
+ logger.debug("[%s] Updating targetauth: %s/%s/%s/%s/%s", self.gateway_name,
target_iqn, user, password, mutual_user, mutual_password)
return request({
'username': user,
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+import logging
+
from orchestrator import InventoryFilter
from orchestrator import OrchestratorClientMixin, raise_if_exception, OrchestratorError
-from .. import mgr, logger
+from .. import mgr
from ..tools import wraps
+logger = logging.getLogger('orchestrator')
+
+
# pylint: disable=abstract-method
class OrchestratorAPI(OrchestratorClientMixin):
def __init__(self):
def status(self):
try:
status, desc = super(OrchestratorAPI, self).available()
- logger.info("[ORCH] is orchestrator available: %s, %s", status, desc)
+ logger.info("is orchestrator available: %s, %s", status, desc)
return dict(available=status, description=desc)
except (RuntimeError, OrchestratorError, ImportError):
return dict(available=False,
from __future__ import absolute_import
from datetime import datetime
+import logging
-from .. import mgr, logger
from . import rbd
+from .. import mgr
+
+
+logger = logging.getLogger('progress')
def _progress_event_to_dashboard_task_common(event, task):
progress_events = mgr.remote('progress', "_json")
for ev in progress_events['events']:
- logger.debug("[Progress] event=%s", ev)
+ logger.debug("event=%s", ev)
executing_t.append(_progress_event_to_dashboard_task(ev))
for ev in progress_events['completed']:
- logger.debug("[Progress] finished event=%s", ev)
+ logger.debug("finished event=%s", ev)
finished_t.append(_progress_event_to_dashboard_task(ev, True))
return executing_t, finished_t
from __future__ import absolute_import
import re
+import logging
import ipaddress
from distutils.util import strtobool
import xml.etree.ElementTree as ET # noqa: N814
from ..settings import Settings, Options
from ..rest_client import RestClient, RequestException
from ..tools import build_url, dict_contains_path, json_str_to_object, partial_dict
-from .. import mgr, logger
+from .. import mgr
try:
from typing import Any, Dict, List # pylint: disable=unused-import
pass # For typing only
+logger = logging.getLogger('rgw_client')
+
+
class NoCredentialsException(RequestException):
def __init__(self):
super(NoCredentialsException, self).__init__(
import os
import errno
import json
+import logging
import threading
import warnings
import six
from six.moves.urllib import parse
-from .. import mgr, logger
+from .. import mgr
from ..tools import prepare_url_prefix
if six.PY2:
FileNotFoundError = IOError # pylint: disable=redefined-builtin
+
+logger = logging.getLogger('sso')
+
try:
from onelogin.saml2.settings import OneLogin_Saml2_Settings as Saml2Settings
from onelogin.saml2.errors import OneLogin_Saml2_Error as Saml2Error
return "{}{}".format(cls.SSODB_CONFIG_KEY, version)
def check_and_update_db(self):
- logger.debug("SSO: Checking for previews DB versions")
+ logger.debug("Checking for previous DB versions")
if self.VERSION != 1:
raise NotImplementedError()
@classmethod
def load(cls):
- logger.info("SSO: Loading SSO DB version=%s", cls.VERSION)
+ logger.info("Loading SSO DB version=%s", cls.VERSION)
json_db = mgr.get_store(cls.ssodb_config_key(), None)
if json_db is None:
- logger.debug("SSO: No DB v%s found, creating new...", cls.VERSION)
+ logger.debug("No DB v%s found, creating new...", cls.VERSION)
db = cls(cls.VERSION, '', Saml2({}))
# check if we can update from a previous version database
db.check_and_update_db()
from __future__ import absolute_import
import json
+import logging
import threading
import time
from mgr_module import CLICommand
-from .. import logger, mgr
+from .. import mgr
from ..controllers import json_error_page, generate_controller_routes
from ..services.auth import AuthManagerTool
from ..services.exception import dashboard_exception_handler
PLUGIN_MANAGER.hook.register_commands()
+logger = logging.getLogger('tests')
+
+
class CmdException(Exception):
def __init__(self, retcode, message):
super(CmdException, self).__init__(message)
import json
import functools
import ipaddress
+import logging
import collections
from datetime import datetime, timedelta
except ImportError:
from urllib.parse import urljoin
-from . import logger, mgr
+from . import mgr
from .exceptions import ViewCacheNoDataException
from .settings import Settings
from .services.auth import JwtManager
def __init__(self):
cherrypy.Tool.__init__(self, 'before_handler', self.request_begin,
priority=10)
+ self.logger = logging.getLogger('request')
def _setup(self):
cherrypy.Tool._setup(self)
req = cherrypy.request
user = JwtManager.get_username()
# Log the request.
- logger.debug('[%s:%s] [%s] [%s] %s', req.remote.ip, req.remote.port,
- req.method, user, req.path_info)
+ self.logger.debug('[%s:%s] [%s] [%s] %s', req.remote.ip, req.remote.port,
+ req.method, user, req.path_info)
# Audit the request.
if Settings.AUDIT_API_ENABLED and req.method not in ['GET']:
url = build_url(req.remote.ip, scheme=req.scheme,
mgr.cluster_log('audit', mgr.CLUSTER_LOG_PRIO_INFO, msg)
def request_error(self):
- self._request_log(logger.error)
- logger.error(cherrypy.response.body)
+ self._request_log(self.logger.error)
+ self.logger.error(cherrypy.response.body)
def request_end(self):
status = cherrypy.response.status[:3]
if status in ["401"]:
# log unauthorized accesses
- self._request_log(logger.warning)
+ self._request_log(self.logger.warning)
else:
- self._request_log(logger.info)
+ self._request_log(self.logger.info)
def _format_bytes(self, num):
units = ['B', 'K', 'M', 'G']
t1 = 0.0
try:
t0 = time.time()
- logger.debug("VC: starting execution of %s", self.fn)
+ self._view.logger.debug("starting execution of %s", self.fn)
val = self.fn(*self.args, **self.kwargs)
t1 = time.time()
except Exception as ex:
with self._view.lock:
- logger.exception("Error while calling fn=%s ex=%s", self.fn,
- str(ex))
+ self._view.logger.exception("Error while calling fn=%s ex=%s", self.fn,
+ str(ex))
self._view.value = None
self._view.value_when = None
self._view.getter_thread = None
self._view.getter_thread = None
self._view.exception = None
- logger.debug("VC: execution of %s finished in: %s", self.fn,
- t1 - t0)
+ self._view.logger.debug("execution of %s finished in: %s", self.fn,
+ t1 - t0)
self.event.set()
class RemoteViewCache(object):
self.latency = 0
self.exception = None
self.lock = threading.Lock()
+ self.logger = logging.getLogger('viewcache')
def reset(self):
with self.lock:
kwargs)
self.getter_thread.start()
else:
- logger.debug("VC: getter_thread still alive for: %s", fn)
+ self.logger.debug("getter_thread still alive for: %s", fn)
ev = self.getter_thread.event
return
cls._running = True
cls._instance = NotificationQueue()
- logger.debug("starting notification queue")
+ cls.logger = logging.getLogger('notification_queue')
+ cls.logger.debug("starting notification queue")
cls._instance.start()
@classmethod
cls._running = False
with cls._cond:
cls._cond.notify()
- logger.debug("waiting for notification queue to finish")
+ cls.logger.debug("waiting for notification queue to finish")
instance.join()
- logger.debug("notification queue stopped")
+ cls.logger.debug("notification queue stopped")
@classmethod
def _registered_handler(cls, func, n_types):
for ev_type in n_types:
if not cls._registered_handler(func, ev_type):
cls._listeners[ev_type].add((priority, func))
- logger.debug("NQ: function %s was registered for events of"
- " type %s", func, ev_type)
+ cls.logger.debug("function %s was registered for events of"
+ " type %s", func, ev_type)
@classmethod
def deregister(cls, func, n_types=None):
break
if to_remove:
listeners.discard(to_remove)
- logger.debug("NQ: function %s was deregistered for events "
- "of type %s", func, ev_type)
+ cls.logger.debug("function %s was deregistered for events "
+ "of type %s", func, ev_type)
@classmethod
def new_notification(cls, notify_type, notify_value):
listener[1](notify_value)
def run(self):
- logger.debug("notification queue started")
+ self.logger.debug("notification queue started")
while self._running:
private_buffer = []
- logger.debug("NQ: processing queue: %s", len(self._queue))
+ self.logger.debug("processing queue: %s", len(self._queue))
try:
while True:
private_buffer.append(self._queue.popleft())
while self._running and not self._queue:
self._cond.wait()
# flush remaining events
- logger.debug("NQ: flush remaining events: %s", len(self._queue))
+ self.logger.debug("flush remaining events: %s", len(self._queue))
self._notify_listeners(self._queue)
self._queue.clear()
- logger.debug("notification queue finished")
+ self.logger.debug("notification queue finished")
# pylint: disable=too-many-arguments, protected-access
@classmethod
def init(cls):
+ cls.logger = logging.getLogger('taskmgr')
NotificationQueue.register(cls._handle_finished_task, 'cd_task_finished')
@classmethod
def _handle_finished_task(cls, task):
- logger.info("TM: finished %s", task)
+ cls.logger.info("finished %s", task)
with cls._lock:
cls._executing_tasks.remove(task)
cls._finished_tasks.append(task)
exception_handler)
with cls._lock:
if task in cls._executing_tasks:
- logger.debug("TM: task already executing: %s", task)
+ cls.logger.debug("task already executing: %s", task)
for t in cls._executing_tasks:
if t == task:
return t
- logger.debug("TM: created %s", task)
+ cls.logger.debug("created %s", task)
cls._executing_tasks.add(task)
- logger.info("TM: running %s", task)
+ cls.logger.info("running %s", task)
task._run()
return task
# pylint: disable=protected-access
class TaskExecutor(object):
def __init__(self):
+ self.logger = logging.getLogger('taskexec')
self.task = None
def init(self, task):
# pylint: disable=broad-except
def start(self):
- logger.debug("EX: executing task %s", self.task)
+ self.logger.debug("executing task %s", self.task)
try:
self.task.fn(*self.task.fn_args, **self.task.fn_kwargs)
except Exception as ex:
- logger.exception("Error while calling %s", self.task)
+ self.logger.exception("Error while calling %s", self.task)
self.finish(None, ex)
def finish(self, ret_value, exception):
if not exception:
- logger.debug("EX: successfully finished task: %s", self.task)
+ self.logger.debug("successfully finished task: %s", self.task)
else:
- logger.debug("EX: task finished with exception: %s", self.task)
+ self.logger.debug("task finished with exception: %s", self.task)
self.task._complete(ret_value, exception)
def _run(self):
TaskManager._task_local_data.task = self.task
try:
- logger.debug("TEX: executing task %s", self.task)
+ self.logger.debug("executing task %s", self.task)
val = self.task.fn(*self.task.fn_args, **self.task.fn_kwargs)
except Exception as ex:
- logger.exception("Error while calling %s", self.task)
+ self.logger.exception("Error while calling %s", self.task)
self.finish(None, ex)
else:
self.finish(val, None)
self.end_time = None
self.duration = 0
self.exception = None
+ self.logger = logging.getLogger('task')
self.lock = threading.Lock()
def __hash__(self):
if not self.exception:
self.set_progress(100, True)
NotificationQueue.new_notification('cd_task_finished', self)
- logger.debug("TK: execution of %s finished in: %s s", self,
- self.duration)
+ self.logger.debug("execution of %s finished in: %s s", self,
+ self.duration)
def _handle_task_finished(self, task):
if self == task: