]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephfs-top: dump values to stdout
authorJos Collin <jcollin@redhat.com>
Mon, 21 Nov 2022 09:35:01 +0000 (15:05 +0530)
committerJos Collin <jcollin@redhat.com>
Thu, 16 Feb 2023 09:43:19 +0000 (15:13 +0530)
Fixes: https://tracker.ceph.com/issues/57014
Signed-off-by: Jos Collin <jcollin@redhat.com>
src/tools/cephfs/top/cephfs-top

index cdcb925b0b74fac7ec461c83ba97b17c04592521..5461b9d759531866dced62a3fa4a3b527df8b2fe 100755 (executable)
@@ -100,10 +100,6 @@ CLIENT_METADATA_VALID_METRICS_KEY = "valid_metrics"
 GLOBAL_METRICS_KEY = "global_metrics"
 GLOBAL_COUNTERS_KEY = "global_counters"
 
-last_time = time.time()
-last_read_size = {}
-last_write_size = {}
-
 fs_list = []
 # store the current states of cephfs-top
 # last_fs    : last filesystem visited
@@ -156,8 +152,119 @@ def wrap(s, sl):
     return f'{s[0:sl-1]}+'
 
 
-class FSTop(object):
+class FSTopBase(object):
+    def __init__(self):
+        self.last_time = time.time()
+        self.last_read_size = {}
+        self.last_write_size = {}
+        self.dump_json = {}
+
+    @staticmethod
+    def has_metric(metadata, metrics_key):
+        return metrics_key in metadata
+
+    @staticmethod
+    def has_metrics(metadata, metrics_keys):
+        for key in metrics_keys:
+            if not FSTopBase.has_metric(metadata, key):
+                return False
+        return True
+
+    def __build_clients(self, fs):
+        fs_meta = self.dump_json.setdefault(fs, {})
+        fs_key = self.stats_json[GLOBAL_METRICS_KEY].get(fs, {})
+        clients = fs_key.keys()
+        for client_id in clients:
+            cur_time = time.time()
+            duration = cur_time - self.last_time
+            self.last_time = cur_time
+            client_meta = self.stats_json[CLIENT_METADATA_KEY].get(fs, {}).get(client_id, {})
+            for item in MAIN_WINDOW_TOP_LINE_ITEMS_START[1:]:
+                if item == FS_TOP_MAIN_WINDOW_COL_CLIENT_ID:
+                    client_id_meta = fs_meta.setdefault(client_id.split('.')[1], {})
+                elif item == FS_TOP_MAIN_WINDOW_COL_MNT_ROOT:
+                    client_id_meta.update({item:
+                                           client_meta[CLIENT_METADATA_MOUNT_ROOT_KEY]})
+            counters = [m.upper() for m in self.stats_json[GLOBAL_COUNTERS_KEY]]
+            metrics = fs_key.get(client_id, {})
+            cidx = 0
+            for item in counters:
+                if item in MAIN_WINDOW_TOP_LINE_METRICS_LEGACY:
+                    cidx += 1
+                    continue
+                m = metrics[cidx]
+                key = MGR_STATS_COUNTERS[cidx]
+                typ = MAIN_WINDOW_TOP_LINE_METRICS[key]
+                if item.lower() in client_meta.get(
+                        CLIENT_METADATA_VALID_METRICS_KEY, []):
+                    key_name = self.items(item)
+                    if typ == MetricType.METRIC_TYPE_PERCENTAGE:
+                        client_id_meta.update({f'{key_name}': calc_perc(m)})
+                    elif typ == MetricType.METRIC_TYPE_LATENCY:
+                        client_id_meta.update({f'{key_name}': calc_lat(m)})
+                    elif typ == MetricType.METRIC_TYPE_STDEV:
+                        client_id_meta.update({f'{key_name}': calc_stdev(m)})
+                    elif typ == MetricType.METRIC_TYPE_SIZE:
+                        client_id_meta.update({f'{key_name}': calc_size(m)})
+                        # average io sizes
+                        client_id_meta.update({f'{self.avg_items(item)}':
+                                               calc_avg_size(m)})
+                        # io speeds
+                        size = 0
+                        if key == "READ_IO_SIZES":
+                            if m[1] > 0:
+                                last_size = self.last_read_size.get(client_id, 0)
+                                size = m[1] - last_size
+                                self.last_read_size[client_id] = m[1]
+                        if key == "WRITE_IO_SIZES":
+                            if m[1] > 0:
+                                last_size = self.last_write_size.get(client_id, 0)
+                                size = m[1] - last_size
+                                self.last_write_size[client_id] = m[1]
+                        client_id_meta.update({f'{self.speed_items(item)}':
+                                               calc_speed(abs(size), duration)})
+                    else:
+                        # display 0th element from metric tuple
+                        client_id_meta.update({f'{key_name}': f'{m[0]}'})
+                else:
+                    client_id_meta.update({f'{self.items(item)}': "N/A"})
+                cidx += 1
+
+            for item in MAIN_WINDOW_TOP_LINE_ITEMS_END:
+                if item == FS_TOP_MAIN_WINDOW_COL_MNTPT_HOST_ADDR:
+                    if FSTopBase.has_metrics(client_meta,
+                                             [CLIENT_METADATA_MOUNT_POINT_KEY,
+                                              CLIENT_METADATA_HOSTNAME_KEY,
+                                              CLIENT_METADATA_IP_KEY]):
+                        mount_point = f'{client_meta[CLIENT_METADATA_MOUNT_POINT_KEY]}'\
+                            f'@{client_meta[CLIENT_METADATA_HOSTNAME_KEY]}/'\
+                            f'{client_meta[CLIENT_METADATA_IP_KEY]}'
+                        client_id_meta.update({item: mount_point})
+                    else:
+                        client_id_meta.update({item: "N/A"})
+
+    def dump_metrics_to_stdout(self, fs_name=None):
+        fs_list = self.get_fs_names()
+        if not fs_list:
+            sys.stdout.write("No filesystem available\n")
+        else:
+            self.stats_json = self.perf_stats_query()
+            if fs_name:  # --dumpfs
+                if fs_name in fs_list:
+                    self.__build_clients(fs_name)
+                else:
+                    sys.stdout.write(f"Filesystem {fs_name} not available\n")
+                    return
+            else:  # --dump
+                for fs in fs_list:
+                    self.__build_clients(fs)
+            sys.stdout.write(json.dumps(self.dump_json))
+            sys.stdout.write("\n")
+
+
+class FSTop(FSTopBase):
     def __init__(self, args):
+        super(FSTop, self).__init__()
         self.rados = None
         self.stdscr = None  # curses instance
         self.active_screen = ""
@@ -607,17 +714,6 @@ class FSTop(object):
             # return empty string for none type
             return ''
 
-    @staticmethod
-    def has_metric(metadata, metrics_key):
-        return metrics_key in metadata
-
-    @staticmethod
-    def has_metrics(metadata, metrics_keys):
-        for key in metrics_keys:
-            if not FSTop.has_metric(metadata, key):
-                return False
-        return True
-
     def create_table_header(self):  # formerly named as top_line
         heading = []
         for item in MAIN_WINDOW_TOP_LINE_ITEMS_START:
@@ -645,12 +741,11 @@ class FSTop(object):
 
     def create_client(self, fs_name, client_id, metrics, counters,
                       client_meta, y_coord):
-        global last_time
         metrics_dict.setdefault(fs_name, {})
         metrics_dict[fs_name].setdefault(client_id, {})
         cur_time = time.time()
-        duration = cur_time - last_time
-        last_time = cur_time
+        duration = cur_time - self.last_time
+        self.last_time = cur_time
         xp = 0  # xp is incremented after each addstr to position the next incoming metrics.
         for item in MAIN_WINDOW_TOP_LINE_ITEMS_START:  # note: the first item is ITEMS_PAD
             hlen = len(item) + ITEMS_PAD_LEN
@@ -714,16 +809,14 @@ class FSTop(object):
                     size = 0
                     if key == "READ_IO_SIZES":
                         if m[1] > 0:
-                            global last_read_size
-                            last_size = last_read_size.get(client_id, 0)
+                            last_size = self.last_read_size.get(client_id, 0)
                             size = m[1] - last_size
-                            last_read_size[client_id] = m[1]
+                            self.last_read_size[client_id] = m[1]
                     if key == "WRITE_IO_SIZES":
                         if m[1] > 0:
-                            global last_write_size
-                            last_size = last_write_size.get(client_id, 0)
+                            last_size = self.last_write_size.get(client_id, 0)
                             size = m[1] - last_size
-                            last_write_size[client_id] = m[1]
+                            self.last_write_size[client_id] = m[1]
                     speed = calc_speed(abs(size), duration)
                     metrics_dict[fs_name][client_id][self.speed_items(key)] = speed
                     self.fsstats.addstr(y_coord, xp,
@@ -1112,6 +1205,10 @@ if __name__ == '__main__':
                         type=float_greater_than,
                         help='Refresh interval in seconds '
                         f'(default: {DEFAULT_REFRESH_INTERVAL})')
+    parser.add_argument('--dump', dest='dump', action='store_true',
+                        help='Dump the metrics to stdout')
+    parser.add_argument('--dumpfs', action='append',
+                        help='Dump the metrics of the given fs to stdout')
 
     args = parser.parse_args()
     err = False
@@ -1121,6 +1218,10 @@ if __name__ == '__main__':
         if args.selftest:
             ft.selftest()
             sys.stdout.write("selftest ok\n")
+        elif args.dump:
+            ft.dump_metrics_to_stdout()
+        elif args.dumpfs:
+            ft.dump_metrics_to_stdout(args.dumpfs[0])
         else:
             curses.wrapper(ft.setup_curses)
     except FSTopException as fst: