]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Add human-readable flag to volume info command
authorNeeraj Pratap Singh <neesingh@redhat.com>
Wed, 21 Sep 2022 06:28:29 +0000 (11:58 +0530)
committerNeeraj Pratap Singh <neesingh@redhat.com>
Wed, 12 Oct 2022 17:57:40 +0000 (23:27 +0530)
This PR intends to add a human-readable flag
to the volume info command so that the used
and avail size can be shown with units according
to the size.

Fixes: https://tracker.ceph.com/issues/57620
Signed-off-by: Neeraj Pratap Singh <neesingh@redhat.com>
(cherry picked from commit 535b86ac94ae84cf651d5710ba3150ed9b30dbd4)

src/pybind/mgr/volumes/fs/volume.py
src/pybind/mgr/volumes/module.py

index 335d76a809ab13db72128cd83eabbf75b10c589e..6d465febc5e3b1b573789ac013d0f6e33e514a8e 100644 (file)
@@ -2,6 +2,7 @@ import json
 import errno
 import logging
 import os
+import mgr_util
 from typing import TYPE_CHECKING
 
 import cephfs
@@ -152,6 +153,7 @@ class VolumeClient(CephfsClient["Module"]):
     def volume_info(self, **kwargs):
         ret     = None
         volname = kwargs['vol_name']
+        human_readable    = kwargs['human_readable']
 
         try:
             with open_volume(self, volname) as fs_handle:
@@ -163,7 +165,10 @@ class VolumeClient(CephfsClient["Module"]):
 
                     usedbytes = st['size']
                     vol_info_dict = get_pending_subvol_deletions_count(path)
-                    vol_info_dict['used_size'] = int(usedbytes)
+                    if human_readable:
+                        vol_info_dict['used_size'] = mgr_util.format_bytes(int(usedbytes), 5)
+                    else:
+                        vol_info_dict['used_size'] = int(usedbytes)
                 except cephfs.Error as e:
                     if e.args[0] == errno.ENOENT:
                         pass
@@ -178,10 +183,16 @@ class VolumeClient(CephfsClient["Module"]):
                         pool_type = "metadata"
                     else:
                         pool_type = "data"
-                    vol_info_dict["pools"][pool_type].append({
-                                    'name': pools[pool_id]['pool_name'],
-                                    'used': pool_stats[pool_id]['bytes_used'],
-                                    'avail': pool_stats[pool_id]['max_avail']})
+                    if human_readable:
+                        vol_info_dict["pools"][pool_type].append({
+                                        'name': pools[pool_id]['pool_name'],
+                                        'used': mgr_util.format_bytes(pool_stats[pool_id]['bytes_used'], 5),
+                                        'avail': mgr_util.format_bytes(pool_stats[pool_id]['max_avail'], 5)})
+                    else:
+                        vol_info_dict["pools"][pool_type].append({
+                                        'name': pools[pool_id]['pool_name'],
+                                        'used': pool_stats[pool_id]['bytes_used'],
+                                        'avail': pool_stats[pool_id]['max_avail']})
 
                 mon_addr_lst = []
                 mon_map_mons = self.mgr.get('mon_map')['mons']
index 752dde33e3e6badbbffee67c03edd1ece9912f8d..b9c8e7893435934318aee3f8180f424bce813947 100644 (file)
@@ -70,7 +70,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
         },
         {
             'cmd': 'fs volume info '
-                   'name=vol_name,type=CephString ',
+                   'name=vol_name,type=CephString '
+                   'name=human_readable,type=CephBool,req=false ',
             'desc': "Get the information of a CephFS volume",
             'perm': 'r'
         },
@@ -555,7 +556,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
 
     @mgr_cmd_wrap
     def _cmd_fs_volume_info(self, inbuf, cmd):
-        return self.vc.volume_info(vol_name=cmd['vol_name'])
+        return self.vc.volume_info(vol_name=cmd['vol_name'],
+                                   human_readable=cmd.get('human_readable', False))
 
     @mgr_cmd_wrap
     def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):