From: Redouane Kachach Date: Fri, 4 Feb 2022 11:22:50 +0000 (+0100) Subject: mgr/cephadm: Adding support for natural sorting X-Git-Tag: v18.0.0~1188^2~1 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=28314cebef328a1e822dbc4c348995433af25fc2;p=ceph-ci.git mgr/cephadm: Adding support for natural sorting Fixes: https://tracker.ceph.com/issues/54026 Signed-off-by: Redouane Kachach --- diff --git a/src/pybind/mgr/orchestrator/module.py b/src/pybind/mgr/orchestrator/module.py index 69bdee01339..04d57fbe1f8 100644 --- a/src/pybind/mgr/orchestrator/module.py +++ b/src/pybind/mgr/orchestrator/module.py @@ -7,6 +7,7 @@ import datetime import yaml from prettytable import PrettyTable +from natsort import natsorted from ceph.deployment.inventory import Device from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection, OSDMethod @@ -400,7 +401,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule, table.align = 'l' table.left_padding_width = 0 table.right_padding_width = 2 - for host in sorted(hosts, key=lambda h: h.hostname): + for host in natsorted(hosts, key=lambda h: h.hostname): table.add_row((host.hostname, host.addr, ' '.join( host.labels), host.status.capitalize())) output = table.get_string() @@ -435,8 +436,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule, raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) - @_cli_write_command( - 'orch host maintenance enter') + @_cli_write_command('orch host maintenance enter') def _host_maintenance_enter(self, hostname: str, force: bool = False) -> HandleCommandResult: """ Prepare a host for maintenance by shutting down and disabling all Ceph daemons (cephadm only) @@ -446,8 +446,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule, return HandleCommandResult(stdout=completion.result_str()) - @_cli_write_command( - 'orch host maintenance exit') + @_cli_write_command('orch host maintenance exit') def _host_maintenance_exit(self, hostname: str) -> HandleCommandResult: """ Return a host from maintenance, restarting all Ceph daemons (cephadm only) @@ -509,7 +508,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule, table.left_padding_width = 0 table.right_padding_width = 2 now = datetime_now() - for host_ in sorted(inv_hosts, key=lambda h: h.name): # type: InventoryHost + for host_ in natsorted(inv_hosts, key=lambda h: h.name): # type: InventoryHost for d in sorted(host_.devices.devices, key=lambda d: d.path): # type: Device led_ident = 'N/A' @@ -687,7 +686,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule, table._align['MEM LIM'] = 'r' table.left_padding_width = 0 table.right_padding_width = 2 - for s in sorted(daemons, key=lambda s: s.name()): + for s in natsorted(daemons, key=lambda d: d.name()): if s.status_desc: status = s.status_desc else: diff --git a/src/pybind/mgr/orchestrator/tests/test_orchestrator.py b/src/pybind/mgr/orchestrator/tests/test_orchestrator.py index 6c847b86881..c6b41251005 100644 --- a/src/pybind/mgr/orchestrator/tests/test_orchestrator.py +++ b/src/pybind/mgr/orchestrator/tests/test_orchestrator.py @@ -5,6 +5,8 @@ import textwrap import pytest import yaml +from ceph.deployment.hostspec import HostSpec +from ceph.deployment.inventory import Devices, Device from ceph.deployment.service_spec import ServiceSpec from ceph.deployment import inventory from ceph.utils import datetime_now @@ -176,6 +178,65 @@ def test_orch_ls(_describe_service): assert r == HandleCommandResult(retval=0, stdout=out, stderr='') +dlist = OrchResult([DaemonDescription(daemon_type="osd", daemon_id="1"), DaemonDescription( + daemon_type="osd", daemon_id="10"), DaemonDescription(daemon_type="osd", daemon_id="2")]) + + +@mock.patch("orchestrator.OrchestratorCli.list_daemons", return_value=dlist) +def test_orch_ps(_describe_service): + + # Ensure natural sorting on daemon names (osd.1, osd.2, osd.10) + cmd = { + 'prefix': 'orch ps' + } + m = OrchestratorCli('orchestrator', 0, 0) + r = m._handle_command(None, cmd) + out = 'NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID \n'\ + 'osd.1 unknown - - - - \n'\ + 'osd.2 unknown - - - - \n'\ + 'osd.10 unknown - - - - ' + assert r == HandleCommandResult(retval=0, stdout=out, stderr='') + + +hlist = OrchResult([HostSpec("ceph-node-1"), HostSpec("ceph-node-2"), HostSpec("ceph-node-10")]) + + +@mock.patch("orchestrator.OrchestratorCli.get_hosts", return_value=hlist) +def test_orch_host_ls(_describe_service): + + # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10) + cmd = { + 'prefix': 'orch host ls' + } + m = OrchestratorCli('orchestrator', 0, 0) + r = m._handle_command(None, cmd) + out = 'HOST ADDR LABELS STATUS \n'\ + 'ceph-node-1 ceph-node-1 \n'\ + 'ceph-node-2 ceph-node-2 \n'\ + 'ceph-node-10 ceph-node-10 \n'\ + '3 hosts in cluster' + assert r == HandleCommandResult(retval=0, stdout=out, stderr='') + + +def test_orch_device_ls(): + devices = Devices([Device("/dev/vdb", available=True)]) + ilist = OrchResult([InventoryHost("ceph-node-1", devices=devices), InventoryHost("ceph-node-2", + devices=devices), InventoryHost("ceph-node-10", devices=devices)]) + + with mock.patch("orchestrator.OrchestratorCli.get_inventory", return_value=ilist): + # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10) + cmd = { + 'prefix': 'orch device ls' + } + m = OrchestratorCli('orchestrator', 0, 0) + r = m._handle_command(None, cmd) + out = 'HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS \n'\ + 'ceph-node-1 /dev/vdb unknown None 0 Yes 0s ago \n'\ + 'ceph-node-2 /dev/vdb unknown None 0 Yes 0s ago \n'\ + 'ceph-node-10 /dev/vdb unknown None 0 Yes 0s ago ' + assert r == HandleCommandResult(retval=0, stdout=out, stderr='') + + def test_preview_table_osd_smoke(): data = [ {