return f'{s[0:sl-1]}+'
-class FSTop(object):
+class FSTopBase(object):
+ def __init__(self):
+ self.last_time = time.time()
+ self.last_read_size = {}
+ self.last_write_size = {}
+ self.dump_json = {}
+
+ @staticmethod
+ def has_metric(metadata, metrics_key):
+ return metrics_key in metadata
+
+ @staticmethod
+ def has_metrics(metadata, metrics_keys):
+ for key in metrics_keys:
+ if not FSTopBase.has_metric(metadata, key):
+ return False
+ return True
+
+ def __build_clients(self, fs):
+ fs_meta = self.dump_json.setdefault(fs, {})
+ fs_key = self.stats_json[GLOBAL_METRICS_KEY].get(fs, {})
+ clients = fs_key.keys()
+ for client_id in clients:
+ cur_time = time.time()
+ duration = cur_time - self.last_time
+ self.last_time = cur_time
+ client_meta = self.stats_json[CLIENT_METADATA_KEY].get(fs, {}).get(client_id, {})
+ for item in MAIN_WINDOW_TOP_LINE_ITEMS_START[1:]:
+ if item == FS_TOP_MAIN_WINDOW_COL_CLIENT_ID:
+ client_id_meta = fs_meta.setdefault(client_id.split('.')[1], {})
+ elif item == FS_TOP_MAIN_WINDOW_COL_MNT_ROOT:
+ client_id_meta.update({item:
+ client_meta[CLIENT_METADATA_MOUNT_ROOT_KEY]})
+ counters = [m.upper() for m in self.stats_json[GLOBAL_COUNTERS_KEY]]
+ metrics = fs_key.get(client_id, {})
+ cidx = 0
+ for item in counters:
+ if item in MAIN_WINDOW_TOP_LINE_METRICS_LEGACY:
+ cidx += 1
+ continue
+ m = metrics[cidx]
+ key = MGR_STATS_COUNTERS[cidx]
+ typ = MAIN_WINDOW_TOP_LINE_METRICS[key]
+ if item.lower() in client_meta.get(
+ CLIENT_METADATA_VALID_METRICS_KEY, []):
+ key_name = self.items(item)
+ if typ == MetricType.METRIC_TYPE_PERCENTAGE:
+ client_id_meta.update({f'{key_name}': calc_perc(m)})
+ elif typ == MetricType.METRIC_TYPE_LATENCY:
+ client_id_meta.update({f'{key_name}': calc_lat(m)})
+ elif typ == MetricType.METRIC_TYPE_STDEV:
+ client_id_meta.update({f'{key_name}': calc_stdev(m)})
+ elif typ == MetricType.METRIC_TYPE_SIZE:
+ client_id_meta.update({f'{key_name}': calc_size(m)})
+ # average io sizes
+ client_id_meta.update({f'{self.avg_items(item)}':
+ calc_avg_size(m)})
+ # io speeds
+ size = 0
+ if key == "READ_IO_SIZES":
+ if m[1] > 0:
+ last_size = self.last_read_size.get(client_id, 0)
+ size = m[1] - last_size
+ self.last_read_size[client_id] = m[1]
+ if key == "WRITE_IO_SIZES":
+ if m[1] > 0:
+ last_size = self.last_write_size.get(client_id, 0)
+ size = m[1] - last_size
+ self.last_write_size[client_id] = m[1]
+ client_id_meta.update({f'{self.speed_items(item)}':
+ calc_speed(abs(size), duration)})
+ else:
+ # display 0th element from metric tuple
+ client_id_meta.update({f'{key_name}': f'{m[0]}'})
+ else:
+ client_id_meta.update({f'{self.items(item)}': "N/A"})
+ cidx += 1
+
+ for item in MAIN_WINDOW_TOP_LINE_ITEMS_END:
+ if item == FS_TOP_MAIN_WINDOW_COL_MNTPT_HOST_ADDR:
+ if FSTopBase.has_metrics(client_meta,
+ [CLIENT_METADATA_MOUNT_POINT_KEY,
+ CLIENT_METADATA_HOSTNAME_KEY,
+ CLIENT_METADATA_IP_KEY]):
+ mount_point = f'{client_meta[CLIENT_METADATA_MOUNT_POINT_KEY]}'\
+ f'@{client_meta[CLIENT_METADATA_HOSTNAME_KEY]}/'\
+ f'{client_meta[CLIENT_METADATA_IP_KEY]}'
+ client_id_meta.update({item: mount_point})
+ else:
+ client_id_meta.update({item: "N/A"})
+
+ def dump_metrics_to_stdout(self, fs_name=None):
+ fs_list = self.get_fs_names()
+ if not fs_list:
+ sys.stdout.write("No filesystem available\n")
+ else:
+ self.stats_json = self.perf_stats_query()
+ if fs_name: # --dumpfs
+ if fs_name in fs_list:
+ self.__build_clients(fs_name)
+ else:
+ sys.stdout.write(f"Filesystem {fs_name} not available\n")
+ return
+ else: # --dump
+ for fs in fs_list:
+ self.__build_clients(fs)
+ sys.stdout.write(json.dumps(self.dump_json))
+ sys.stdout.write("\n")
+
+
+class FSTop(FSTopBase):
def __init__(self, args):
+ super(FSTop, self).__init__()
self.rados = None
self.stdscr = None # curses instance
- self.current_screen = ""
+ self.active_screen = ""
self.client_name = args.id
self.cluster_name = args.cluster
self.conffile = args.conffile