from .util import Logger, normalize_dict, to_snake_case
from typing import Dict, Any
-log = Logger(__name__)
-
class RedfishDell(RedfishSystem):
def __init__(self, **kw: Any) -> None:
+ self.log = Logger(__name__)
if kw.get('system_endpoint') is None:
kw['system_endpoint'] = '/Systems/System.Embedded.1'
super().__init__(**kw)
def _update_network(self) -> None:
fields = ['Description', 'Name', 'SpeedMbps', 'Status']
- log.logger.info("Updating network")
+ self.log.logger.info("Updating network")
self._system['network'] = self.build_data(fields, 'EthernetInterfaces')
def _update_processors(self) -> None:
'Model',
'Status',
'Manufacturer']
- log.logger.info("Updating processors")
+ self.log.logger.info("Updating processors")
self._system['processors'] = self.build_data(fields, 'Processors')
def _update_storage(self) -> None:
'SerialNumber', 'Status',
'PhysicalLocation']
entities = self.get_members('Storage')
- log.logger.info("Updating storage")
+ self.log.logger.info("Updating storage")
result: Dict[str, Dict[str, Dict]] = dict()
for entity in entities:
for drive in entity['Drives']:
self._system['storage'] = normalize_dict(result)
def _update_metadata(self) -> None:
- log.logger.info("Updating metadata")
+ self.log.logger.info("Updating metadata")
pass
def _update_memory(self) -> None:
'MemoryDeviceType',
'CapacityMiB',
'Status']
- log.logger.info("Updating memory")
+ self.log.logger.info("Updating memory")
self._system['memory'] = self.build_data(fields, 'Memory')
def _update_power(self) -> None:
- log.logger.info("Updating power")
+ self.log.logger.info("Updating power")
pass
from .util import Logger, retry, normalize_dict, to_snake_case
from typing import Dict, Any, List
-log = Logger(__name__)
-
class RedfishSystem(BaseSystem):
def __init__(self, **kw: Any) -> None:
super().__init__(**kw)
+ self.log = Logger(__name__)
self.host: str = kw['host']
self.username: str = kw['username']
self.password: str = kw['password']
self.system_endpoint = kw.get('system_endpoint', '/Systems/1')
- log.logger.info(f"redfish system initialization, host: {self.host}, user: {self.username}")
self.client = RedFishClient(self.host, self.username, self.password)
+ self.log.logger.info(f"redfish system initialization, host: {self.host}, user: {self.username}")
self._system: Dict[str, Dict[str, Any]] = {}
self.run: bool = False
def _get_path(self, path: str) -> Dict:
result = self.client.get_path(path)
if result is None:
- log.logger.error(f"The client reported an error when getting path: {path}")
+ self.log.logger.error(f"The client reported an error when getting path: {path}")
raise RuntimeError(f"Could not get path: {path}")
return result
try:
result[member_id][to_snake_case(field)] = member_info[field]
except KeyError:
- log.logger.warning(f"Could not find field: {field} in member_info: {member_info}")
+ self.log.logger.warning(f"Could not find field: {field} in member_info: {member_info}")
return normalize_dict(result)
# - caching logic
try:
while self.run:
- log.logger.debug("waiting for a lock.")
+ self.log.logger.debug("waiting for a lock.")
self.lock.acquire()
- log.logger.debug("lock acquired.")
+ self.log.logger.debug("lock acquired.")
try:
self._update_system()
# following calls in theory can be done in parallel
sleep(5)
finally:
self.lock.release()
- log.logger.debug("lock released.")
+ self.log.logger.debug("lock released.")
# Catching 'Exception' is probably not a good idea (devel only)
except Exception as e:
- log.logger.error(f"Error detected, logging out from redfish api.\n{e}")
+ self.log.logger.error(f"Error detected, logging out from redfish api.\n{e}")
self.client.logout()
raise
def flush(self) -> None:
- log.logger.info("Acquiring lock to flush data.")
+ self.log.logger.info("Acquiring lock to flush data.")
self.lock.acquire()
- log.logger.info("Lock acquired, flushing data.")
+ self.log.logger.info("Lock acquired, flushing data.")
self._system = {}
self.previous_data = {}
- log.logger.info("Data flushed.")
+ self.log.logger.info("Data flushed.")
self.data_ready = False
- log.logger.info("Data marked as not ready.")
+ self.log.logger.info("Data marked as not ready.")
self.lock.release()
- log.logger.info("Lock released.")
+ self.log.logger.info("Lock released.")
from .util import Logger
from typing import Dict, Any
-log = Logger(__name__)
-
class Reporter:
def __init__(self, system: Any, data: Dict[str, Any], observer_url: str) -> None:
self.observer_url = observer_url
self.finish = False
self.data = data
+ self.log = Logger(__name__)
+ self.log.logger.info(f'Observer url set to {self.observer_url}')
def stop(self) -> None:
self.finish = True
# scenario probably we should just send the sub-parts
# that have changed to minimize the traffic in
# dense clusters
- log.logger.debug("waiting for a lock.")
+ self.log.logger.debug("waiting for a lock.")
self.system.lock.acquire()
- log.logger.debug("lock acquired.")
+ self.log.logger.debug("lock acquired.")
if self.system.data_ready:
- log.logger.info('data ready to be sent to the mgr.')
+ self.log.logger.info('data ready to be sent to the mgr.')
if not self.system.get_system() == self.system.previous_data:
- log.logger.info('data has changed since last iteration.')
+ self.log.logger.info('data has changed since last iteration.')
self.data['data'] = self.system.get_system()
try:
# TODO: add a timeout parameter to the reporter in the config file
- log.logger.info(f"sending data to {self.observer_url}")
+ self.log.logger.info(f"sending data to {self.observer_url}")
r = requests.post(f"{self.observer_url}", json=self.data, timeout=5, verify=False)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as e:
- log.logger.error(f"The reporter couldn't send data to the mgr: {e}")
+ self.log.logger.error(f"The reporter couldn't send data to the mgr: {e}")
# Need to add a new parameter 'max_retries' to the reporter if it can't
# send the data for more than x times, maybe the daemon should stop altogether
else:
self.system.previous_data = self.system.get_system()
else:
- log.logger.info('no diff, not sending data to the mgr.')
+ self.log.logger.info('no diff, not sending data to the mgr.')
self.system.lock.release()
- log.logger.debug("lock released.")
+ self.log.logger.debug("lock released.")
time.sleep(5)