from system import System
from redfish_client import RedFishClient
-from threading import Thread
+from threading import Thread, Lock
from time import sleep
from util import logger
self.run = False
self.thread = None
self.start_client()
+ self.data_ready = False
+ self.previous_data = {}
+ self.lock = Lock()
def start_client(self):
log.info(f"redfish system initialization, host: {self.host}, user: {self.username}")
# - caching logic
try:
while self.run:
- self._update_system()
- # following calls in theory can be done in parallel
- self._update_metadata()
- self._update_memory()
- self._update_power()
- self._update_network()
- self._update_processors()
- self._update_storage()
- sleep(3)
+ log.debug("waiting for a lock.")
+ self.lock.acquire()
+ log.debug("lock acquired.")
+ try:
+ self._update_system()
+ # following calls in theory can be done in parallel
+ self._update_metadata()
+ self._update_memory()
+ self._update_power()
+ self._update_network()
+ self._update_processors()
+ self._update_storage()
+ self.data_ready = True
+ sleep(5)
+ finally:
+ self.lock.release()
+ log.debug("lock released.")
# Catching 'Exception' is probably not a good idea (devel only)
except Exception as e:
log.error(f"Error detected, logging out from redfish api.\n{e}")
# scenario probably we should just send the sub-parts
# that have changed to minimize the traffic in
# dense clusters
- d = self.system.get_system()
- requests.post(self.observer_url, json=d)
- time.sleep(10)
+ if self.system.data_ready:
+ log.debug("waiting for a lock.")
+ try:
+ self.system.lock.acquire()
+ log.debug("lock acquired.")
+ if not self.system.get_system() == self.system.previous_data:
+ self.system.previous_data = self.system.get_system()
+ log.info('data has changed since last iteration.')
+ d = self.system.get_system()
+ requests.post(f"{self.observer_url}/fake_endpoint", json=d)
+ else:
+ log.info('no diff, not sending data to the mgr.')
+ finally:
+ self.system.lock.release()
+ log.debug("lock released.")
+ time.sleep(20)