]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
node-proxy: logging refactor
authorGuillaume Abrioux <gabrioux@ibm.com>
Tue, 19 Sep 2023 07:46:42 +0000 (07:46 +0000)
committerGuillaume Abrioux <gabrioux@ibm.com>
Thu, 25 Jan 2024 14:43:30 +0000 (14:43 +0000)
This makes `logger` a class attribute so we don't have
the `Logger` instantiation outside of the different classes.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
src/cephadm/cephadmlib/node_proxy/redfish_dell.py
src/cephadm/cephadmlib/node_proxy/redfish_system.py
src/cephadm/cephadmlib/node_proxy/reporter.py

index f1cd6f6509be1fbccc663f9c0aa7c34f86b8085e..796c989f8834d67b131d9f630ec38b3ce3a52e19 100644 (file)
@@ -2,18 +2,17 @@ from .redfish_system import RedfishSystem
 from .util import Logger, normalize_dict, to_snake_case
 from typing import Dict, Any
 
-log = Logger(__name__)
-
 
 class RedfishDell(RedfishSystem):
     def __init__(self, **kw: Any) -> None:
+        self.log = Logger(__name__)
         if kw.get('system_endpoint') is None:
             kw['system_endpoint'] = '/Systems/System.Embedded.1'
         super().__init__(**kw)
 
     def _update_network(self) -> None:
         fields = ['Description', 'Name', 'SpeedMbps', 'Status']
-        log.logger.info("Updating network")
+        self.log.logger.info("Updating network")
         self._system['network'] = self.build_data(fields, 'EthernetInterfaces')
 
     def _update_processors(self) -> None:
@@ -24,7 +23,7 @@ class RedfishDell(RedfishSystem):
                   'Model',
                   'Status',
                   'Manufacturer']
-        log.logger.info("Updating processors")
+        self.log.logger.info("Updating processors")
         self._system['processors'] = self.build_data(fields, 'Processors')
 
     def _update_storage(self) -> None:
@@ -34,7 +33,7 @@ class RedfishDell(RedfishSystem):
                   'SerialNumber', 'Status',
                   'PhysicalLocation']
         entities = self.get_members('Storage')
-        log.logger.info("Updating storage")
+        self.log.logger.info("Updating storage")
         result: Dict[str, Dict[str, Dict]] = dict()
         for entity in entities:
             for drive in entity['Drives']:
@@ -48,7 +47,7 @@ class RedfishDell(RedfishSystem):
         self._system['storage'] = normalize_dict(result)
 
     def _update_metadata(self) -> None:
-        log.logger.info("Updating metadata")
+        self.log.logger.info("Updating metadata")
         pass
 
     def _update_memory(self) -> None:
@@ -56,9 +55,9 @@ class RedfishDell(RedfishSystem):
                   'MemoryDeviceType',
                   'CapacityMiB',
                   'Status']
-        log.logger.info("Updating memory")
+        self.log.logger.info("Updating memory")
         self._system['memory'] = self.build_data(fields, 'Memory')
 
     def _update_power(self) -> None:
-        log.logger.info("Updating power")
+        self.log.logger.info("Updating power")
         pass
index bb8286b127e1ad29c1b683ca27b01f741b4ee7ef..ad556ab8dea96bbdb40185e70bc230cd71a51e55 100644 (file)
@@ -5,18 +5,17 @@ from time import sleep
 from .util import Logger, retry, normalize_dict, to_snake_case
 from typing import Dict, Any, List
 
-log = Logger(__name__)
-
 
 class RedfishSystem(BaseSystem):
     def __init__(self, **kw: Any) -> None:
         super().__init__(**kw)
+        self.log = Logger(__name__)
         self.host: str = kw['host']
         self.username: str = kw['username']
         self.password: str = kw['password']
         self.system_endpoint = kw.get('system_endpoint', '/Systems/1')
-        log.logger.info(f"redfish system initialization, host: {self.host}, user: {self.username}")
         self.client = RedFishClient(self.host, self.username, self.password)
+        self.log.logger.info(f"redfish system initialization, host: {self.host}, user: {self.username}")
 
         self._system: Dict[str, Dict[str, Any]] = {}
         self.run: bool = False
@@ -30,7 +29,7 @@ class RedfishSystem(BaseSystem):
     def _get_path(self, path: str) -> Dict:
         result = self.client.get_path(path)
         if result is None:
-            log.logger.error(f"The client reported an error when getting path: {path}")
+            self.log.logger.error(f"The client reported an error when getting path: {path}")
             raise RuntimeError(f"Could not get path: {path}")
         return result
 
@@ -50,7 +49,7 @@ class RedfishSystem(BaseSystem):
                 try:
                     result[member_id][to_snake_case(field)] = member_info[field]
                 except KeyError:
-                    log.logger.warning(f"Could not find field: {field} in member_info: {member_info}")
+                    self.log.logger.warning(f"Could not find field: {field} in member_info: {member_info}")
 
         return normalize_dict(result)
 
@@ -125,9 +124,9 @@ class RedfishSystem(BaseSystem):
         #  - caching logic
         try:
             while self.run:
-                log.logger.debug("waiting for a lock.")
+                self.log.logger.debug("waiting for a lock.")
                 self.lock.acquire()
-                log.logger.debug("lock acquired.")
+                self.log.logger.debug("lock acquired.")
                 try:
                     self._update_system()
                     # following calls in theory can be done in parallel
@@ -141,21 +140,21 @@ class RedfishSystem(BaseSystem):
                     sleep(5)
                 finally:
                     self.lock.release()
-                    log.logger.debug("lock released.")
+                    self.log.logger.debug("lock released.")
         # Catching 'Exception' is probably not a good idea (devel only)
         except Exception as e:
-            log.logger.error(f"Error detected, logging out from redfish api.\n{e}")
+            self.log.logger.error(f"Error detected, logging out from redfish api.\n{e}")
             self.client.logout()
             raise
 
     def flush(self) -> None:
-        log.logger.info("Acquiring lock to flush data.")
+        self.log.logger.info("Acquiring lock to flush data.")
         self.lock.acquire()
-        log.logger.info("Lock acquired, flushing data.")
+        self.log.logger.info("Lock acquired, flushing data.")
         self._system = {}
         self.previous_data = {}
-        log.logger.info("Data flushed.")
+        self.log.logger.info("Data flushed.")
         self.data_ready = False
-        log.logger.info("Data marked as not ready.")
+        self.log.logger.info("Data marked as not ready.")
         self.lock.release()
-        log.logger.info("Lock released.")
+        self.log.logger.info("Lock released.")
index 380884dc5b40f642e103f92efbf54be6ae3cc16f..79de9509ac86f70f4593863e61e7b7126363506c 100644 (file)
@@ -4,8 +4,6 @@ import time
 from .util import Logger
 from typing import Dict, Any
 
-log = Logger(__name__)
-
 
 class Reporter:
     def __init__(self, system: Any, data: Dict[str, Any], observer_url: str) -> None:
@@ -13,6 +11,8 @@ class Reporter:
         self.observer_url = observer_url
         self.finish = False
         self.data = data
+        self.log = Logger(__name__)
+        self.log.logger.info(f'Observer url set to {self.observer_url}')
 
     def stop(self) -> None:
         self.finish = True
@@ -29,27 +29,27 @@ class Reporter:
             # scenario probably we should just send the sub-parts
             # that have changed to minimize the traffic in
             # dense clusters
-            log.logger.debug("waiting for a lock.")
+            self.log.logger.debug("waiting for a lock.")
             self.system.lock.acquire()
-            log.logger.debug("lock acquired.")
+            self.log.logger.debug("lock acquired.")
             if self.system.data_ready:
-                log.logger.info('data ready to be sent to the mgr.')
+                self.log.logger.info('data ready to be sent to the mgr.')
                 if not self.system.get_system() == self.system.previous_data:
-                    log.logger.info('data has changed since last iteration.')
+                    self.log.logger.info('data has changed since last iteration.')
                     self.data['data'] = self.system.get_system()
                     try:
                         # TODO: add a timeout parameter to the reporter in the config file
-                        log.logger.info(f"sending data to {self.observer_url}")
+                        self.log.logger.info(f"sending data to {self.observer_url}")
                         r = requests.post(f"{self.observer_url}", json=self.data, timeout=5, verify=False)
                     except (requests.exceptions.RequestException,
                             requests.exceptions.ConnectionError) as e:
-                        log.logger.error(f"The reporter couldn't send data to the mgr: {e}")
+                        self.log.logger.error(f"The reporter couldn't send data to the mgr: {e}")
                         # Need to add a new parameter 'max_retries' to the reporter if it can't
                         # send the data for more than x times, maybe the daemon should stop altogether
                     else:
                         self.system.previous_data = self.system.get_system()
                 else:
-                    log.logger.info('no diff, not sending data to the mgr.')
+                    self.log.logger.info('no diff, not sending data to the mgr.')
             self.system.lock.release()
-            log.logger.debug("lock released.")
+            self.log.logger.debug("lock released.")
             time.sleep(5)