from rbd import RBD
try:
- from typing import Optional
+ from typing import Optional, Dict, Any, Set
except:
pass
for labelvalues, value in self.value.items():
if self.labelnames:
- labels = zip(self.labelnames, labelvalues)
- labels = ','.join('%s="%s"' % (k, v) for k, v in labels)
+ labels_list = zip(self.labelnames, labelvalues)
+ labels = ','.join('%s="%s"' % (k, v) for k, v in labels_list)
else:
labels = ''
if labels:
class MetricCollectionThread(threading.Thread):
- def __init__(self):
+ def __init__(self, module):
+ # type: (Module) -> None
+ self.mod = module
super(MetricCollectionThread, self).__init__(target=self.collect)
- @staticmethod
- def collect():
- inst = _global_instance
- inst.log.info('starting metric collection thread')
+ def collect(self):
+ self.mod.log.info('starting metric collection thread')
while True:
- if inst.have_mon_connection():
+ self.mod.log.debug('collecting cache in thread')
+ if self.mod.have_mon_connection():
start_time = time.time()
- data = inst.collect()
+ data = self.mod.collect()
duration = time.time() - start_time
+
+ self.mod.log.debug('collecting cache in thread done')
- sleep_time = inst.scrape_interval - duration
+ sleep_time = self.mod.scrape_interval - duration
if sleep_time < 0:
- inst.log.warning(
+ self.mod.log.warning(
'Collecting data took more time than configured scrape interval. '
'This possibly results in stale data. Please check the '
'`stale_cache_strategy` configuration option. '
'Collecting data took {:.2f} seconds but scrape interval is configured '
'to be {:.0f} seconds.'.format(
duration,
- inst.scrape_interval,
+ self.mod.scrape_interval,
)
)
sleep_time = 0
- with inst.collect_lock:
- inst.collect_cache = data
- inst.collect_time = duration
+ with self.mod.collect_lock:
+ self.mod.collect_cache = data
+ self.mod.collect_time = duration
time.sleep(sleep_time)
else:
- inst.log.error('No MON connection')
- time.sleep(inst.scrape_interval)
+ self.mod.log.error('No MON connection')
+ time.sleep(self.mod.scrape_interval)
class Module(MgrModule):
self.metrics = self._setup_static_metrics()
self.shutdown_event = threading.Event()
self.collect_lock = threading.Lock()
- self.collect_time = 0
+ self.collect_time = 0.0
self.scrape_interval = 15.0
self.stale_cache_strategy = self.STALE_CACHE_FAIL
self.collect_cache = None
'read_latency': {'type': self.PERFCOUNTER_LONGRUNAVG,
'desc': 'RBD image reads latency (msec)'},
},
- }
+ } # type: Dict[str, Any]
global _global_instance
_global_instance = self
- MetricCollectionThread().start()
+ MetricCollectionThread(_global_instance).start()
def _setup_static_metrics(self):
metrics = {}
else:
pool_keys.append((pool_name, namespace_name))
- pools = {}
+ pools = {} # type: Dict[str, Set[str]]
for pool_key in pool_keys:
pool_name = pool_key[0]
namespace_name = pool_key[1]
pools[pool_name].add(namespace_name)
rbd_stats_pools = {}
- for pool_id in list(self.rbd_stats['pools']):
+ for pool_id in self.rbd_stats['pools'].keys():
name = self.rbd_stats['pools'][pool_id]['name']
if name not in pools:
del self.rbd_stats['pools'][pool_id]
# TODO use get_config_prefix or get_config here once
# https://github.com/ceph/ceph/pull/20458 is merged
result = CommandResult("")
+ assert isinstance(_global_instance, Module)
_global_instance.send_command(
result, "mon", '',
json.dumps({
@cherrypy.expose
def metrics(self):
# Lock the function execution
+ assert isinstance(_global_instance, Module)
with _global_instance.collect_lock:
return self._metrics(_global_instance)
@staticmethod
def _metrics(instance):
+ # type: (Module) -> Any
# Return cached data if available
if not instance.collect_cache:
raise cherrypy.HTTPError(503, 'No cached data available yet')
def respond():
+ assert isinstance(instance, Module)
cherrypy.response.headers['Content-Type'] = 'text/plain'
return instance.collect_cache