From: Sebastian Wagner Date: Wed, 26 Aug 2020 09:45:56 +0000 (+0200) Subject: mgr/cephadm: PEP8tify some tests X-Git-Tag: v17.0.0~1361^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=a385c3cc28188f5a4c85a113086ab11f9cace67c;p=ceph.git mgr/cephadm: PEP8tify some tests Signed-off-by: Sebastian Wagner --- diff --git a/src/pybind/mgr/cephadm/tests/fixtures.py b/src/pybind/mgr/cephadm/tests/fixtures.py index ca15d4e3976a3..9f7864ccebed4 100644 --- a/src/pybind/mgr/cephadm/tests/fixtures.py +++ b/src/pybind/mgr/cephadm/tests/fixtures.py @@ -46,7 +46,7 @@ def with_cephadm_module(module_options=None, store=None): mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"): - m = CephadmOrchestrator.__new__ (CephadmOrchestrator) + m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) @@ -114,7 +114,7 @@ def wait(m, c): @contextmanager -def with_host(m:CephadmOrchestrator, name): +def with_host(m: CephadmOrchestrator, name): # type: (CephadmOrchestrator, str) -> None wait(m, m.add_host(HostSpec(hostname=name))) yield @@ -143,4 +143,4 @@ def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth, h yield - assert_rm_service(cephadm_module, spec.service_name()) \ No newline at end of file + assert_rm_service(cephadm_module, spec.service_name()) diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index d1072d4f250b5..fcff289609bd8 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -81,7 +81,7 @@ class TestCephadm(object): # Be careful with backward compatibility when changing things here: assert json.loads(cephadm_module.get_store('inventory')) == \ - {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}} + {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}} with with_host(cephadm_module, 'second'): assert wait(cephadm_module, cephadm_module.get_hosts()) == [ @@ -93,7 +93,7 @@ class TestCephadm(object): assert wait(cephadm_module, cephadm_module.get_hosts()) == [] @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) - @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) + @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) def test_service_ls(self, cephadm_module): with with_host(cephadm_module, 'test'): c = cephadm_module.list_daemons(refresh=True) @@ -177,7 +177,7 @@ class TestCephadm(object): assert wait(cephadm_module, c)[0].name() == 'rgw.myrgw.foobar' @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) - @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) + @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) def test_daemon_action(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): @@ -200,7 +200,7 @@ class TestCephadm(object): cephadm_module._check_daemons() @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) - @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) + @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): @@ -218,7 +218,8 @@ class TestCephadm(object): cephadm_module._check_daemons() - evs = [e.message for e in cephadm_module.events.get_for_daemon(f'rgw.{daemon_id}')] + evs = [e.message for e in cephadm_module.events.get_for_daemon( + f'rgw.{daemon_id}')] assert 'myerror' in ''.join(evs) @@ -240,8 +241,8 @@ class TestCephadm(object): with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: cephadm_module._check_daemons() - _mon_cmd.assert_any_call({'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000'}) - + _mon_cmd.assert_any_call( + {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000'}) @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) def test_mon_add(self, cephadm_module): @@ -348,16 +349,18 @@ class TestCephadm(object): _run_cephadm.assert_any_call( 'test', 'osd', 'ceph-volume', - ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'], + ['--config-json', '-', '--', 'lvm', 'prepare', + '--bluestore', '--data', '/dev/sdb', '--no-systemd'], env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}') - _run_cephadm.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json']) - + _run_cephadm.assert_called_with( + 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json']) @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) @mock.patch("cephadm.module.SpecStore.save") def test_apply_osd_save_placement(self, _save_spec, cephadm_module): with with_host(cephadm_module, 'test'): - json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}} + json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, + 'service_id': 'foo', 'data_devices': {'all': True}} spec = ServiceSpec.from_json(json_spec) assert isinstance(spec, DriveGroupSpec) c = cephadm_module.apply([spec]) @@ -367,7 +370,8 @@ class TestCephadm(object): @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) def test_create_osds(self, cephadm_module): with with_host(cephadm_module, 'test'): - dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(paths=[''])) c = cephadm_module.create_osds(dg) out = wait(cephadm_module, c) assert out == "Created no osd(s) on host test; already created?" @@ -375,7 +379,8 @@ class TestCephadm(object): @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) def test_prepare_drivegroup(self, cephadm_module): with with_host(cephadm_module, 'test'): - dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=[''])) + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(paths=[''])) out = cephadm_module.osd_service.prepare_drivegroup(dg) assert len(out) == 1 f1 = out[0] @@ -388,17 +393,20 @@ class TestCephadm(object): # no preview and only one disk, prepare is used due the hack that is in place. (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"), # no preview and multiple disks, uses batch - (['/dev/sda', '/dev/sdb'], False, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"), + (['/dev/sda', '/dev/sdb'], False, + "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"), # preview and only one disk needs to use batch again to generate the preview (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"), # preview and multiple disks work the same - (['/dev/sda', '/dev/sdb'], True, "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"), + (['/dev/sda', '/dev/sdb'], True, + "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"), ] ) @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command): with with_host(cephadm_module, 'test'): - dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices)) + dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec( + host_pattern='test'), data_devices=DeviceSelection(paths=devices)) ds = DriveSelection(dg, Devices([Device(path) for path in devices])) preview = preview out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) @@ -444,17 +452,19 @@ class TestCephadm(object): assert out == [] @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) - @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) + @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) def test_rgw_update(self, cephadm_module): with with_host(cephadm_module, 'host1'): with with_host(cephadm_module, 'host2'): ps = PlacementSpec(hosts=['host1'], count=1) - c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) + c = cephadm_module.add_rgw( + RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) [out] = wait(cephadm_module, c) match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'") ps = PlacementSpec(hosts=['host1', 'host2'], count=2) - r = cephadm_module._apply_service(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) + r = cephadm_module._apply_service( + RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps)) assert r assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1') @@ -495,7 +505,7 @@ class TestCephadm(object): ] ) @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) - @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) + @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module): with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, spec, meth, 'test'): @@ -507,10 +517,10 @@ class TestCephadm(object): with with_host(cephadm_module, 'test'): ps = PlacementSpec(hosts=['test'], count=1) spec = NFSServiceSpec( - service_id='name', - pool='pool', - namespace='namespace', - placement=ps) + service_id='name', + pool='pool', + namespace='namespace', + placement=ps) c = cephadm_module.add_nfs(spec) [out] = wait(cephadm_module, c) match_glob(out, "Deployed nfs.name.* on host 'test'") @@ -528,11 +538,11 @@ class TestCephadm(object): with with_host(cephadm_module, 'test'): ps = PlacementSpec(hosts=['test'], count=1) spec = IscsiServiceSpec( - service_id='name', - pool='pool', - api_user='user', - api_password='password', - placement=ps) + service_id='name', + pool='pool', + api_user='user', + api_password='password', + placement=ps) c = cephadm_module.add_iscsi(spec) [out] = wait(cephadm_module, c) match_glob(out, "Deployed iscsi.name.* on host 'test'") @@ -596,7 +606,7 @@ class TestCephadm(object): ] ) @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) - @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) + @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None) def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, spec, meth, 'test'): @@ -632,7 +642,6 @@ class TestCephadm(object): assert_rm_daemon(cephadm_module, spec.service_name(), 'host1') # verifies ok-to-stop assert_rm_daemon(cephadm_module, spec.service_name(), 'host2') - @mock.patch("cephadm.module.CephadmOrchestrator._get_connection") @mock.patch("remoto.process.check") def test_offline(self, _check, _get_connection, cephadm_module): @@ -731,7 +740,6 @@ class TestCephadm(object): cephadm_module.cache.load() assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') - def test_etc_ceph_init(self): with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m: assert m.manage_etc_ceph_ceph_conf is True @@ -750,32 +758,33 @@ class TestCephadm(object): assert out == 'registry login scheduled' assert err == '' check_registry_credentials('test-url', 'test-user', 'test-password') - + # test bad login attempt with invalid args code, out, err = cephadm_module.registry_login('bad-args') assert err == ("Invalid arguments. Please provide arguments " - "or -i ") + "or -i ") check_registry_credentials('test-url', 'test-user', 'test-password') - + # test bad login using invalid json file - code, out, err = cephadm_module.registry_login(None, None, None, '{"bad-json": "bad-json"}') + code, out, err = cephadm_module.registry_login( + None, None, None, '{"bad-json": "bad-json"}') assert err == ("json provided for custom registry login did not include all necessary fields. " - "Please setup json file as\n" - "{\n" - " \"url\": \"REGISTRY_URL\",\n" - " \"username\": \"REGISTRY_USERNAME\",\n" - " \"password\": \"REGISTRY_PASSWORD\"\n" - "}\n") + "Please setup json file as\n" + "{\n" + " \"url\": \"REGISTRY_URL\",\n" + " \"username\": \"REGISTRY_USERNAME\",\n" + " \"password\": \"REGISTRY_PASSWORD\"\n" + "}\n") check_registry_credentials('test-url', 'test-user', 'test-password') - + # test good login using valid json file good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" + "json-user" + "\", " - " \"password\": \"" + "json-pass" + "\"}") + " \"password\": \"" + "json-pass" + "\"}") code, out, err = cephadm_module.registry_login(None, None, None, good_json) assert out == 'registry login scheduled' assert err == '' check_registry_credentials('json-url', 'json-user', 'json-pass') - + # test bad login where args are valid but login command fails _run_cephadm.return_value = '{}', 'error', 1 code, out, err = cephadm_module.registry_login('fail-url', 'fail-user', 'fail-password') diff --git a/src/pybind/mgr/cephadm/tests/test_completion.py b/src/pybind/mgr/cephadm/tests/test_completion.py index 2f7956667fc8a..b43cd6a860d07 100644 --- a/src/pybind/mgr/cephadm/tests/test_completion.py +++ b/src/pybind/mgr/cephadm/tests/test_completion.py @@ -45,7 +45,6 @@ class TestCompletion(object): return str(args) assert run_forall(input) == expected - @pytest.mark.parametrize("input,expected", [ ([], []), ([1], ["(1,)"]), diff --git a/src/pybind/mgr/cephadm/tests/test_osd_removal.py b/src/pybind/mgr/cephadm/tests/test_osd_removal.py index 0b4b0cd506cf8..55179da363b1c 100644 --- a/src/pybind/mgr/cephadm/tests/test_osd_removal.py +++ b/src/pybind/mgr/cephadm/tests/test_osd_removal.py @@ -10,6 +10,7 @@ class MockOSD: def __init__(self, osd_id): self.osd_id = osd_id + class TestOSDRemoval: @pytest.mark.parametrize( @@ -62,11 +63,13 @@ class TestOSDRemoval: def test_destroy_osd(self, rm_util): rm_util.destroy_osd(1) - rm_util._run_mon_cmd.assert_called_with({'prefix': 'osd destroy-actual', 'id': 1, 'yes_i_really_mean_it': True}) + rm_util._run_mon_cmd.assert_called_with( + {'prefix': 'osd destroy-actual', 'id': 1, 'yes_i_really_mean_it': True}) def test_purge_osd(self, rm_util): rm_util.purge_osd(1) - rm_util._run_mon_cmd.assert_called_with({'prefix': 'osd purge-actual', 'id': 1, 'yes_i_really_mean_it': True}) + rm_util._run_mon_cmd.assert_called_with( + {'prefix': 'osd purge-actual', 'id': 1, 'yes_i_really_mean_it': True}) class TestOSD: diff --git a/src/pybind/mgr/cephadm/tests/test_upgrade.py b/src/pybind/mgr/cephadm/tests/test_upgrade.py index 44e340ce29173..51dcc8e80fba4 100644 --- a/src/pybind/mgr/cephadm/tests/test_upgrade.py +++ b/src/pybind/mgr/cephadm/tests/test_upgrade.py @@ -5,16 +5,19 @@ from ceph.deployment.service_spec import ServiceSpec from cephadm import CephadmOrchestrator from .fixtures import _run_cephadm, wait, cephadm_module, with_host, with_service + @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) def test_upgrade_start(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): - assert wait(cephadm_module, cephadm_module.upgrade_start('image_id', None)) == 'Initiating upgrade to image_id' + assert wait(cephadm_module, cephadm_module.upgrade_start( + 'image_id', None)) == 'Initiating upgrade to image_id' assert wait(cephadm_module, cephadm_module.upgrade_status()).target_image == 'image_id' assert wait(cephadm_module, cephadm_module.upgrade_pause()) == 'Paused upgrade to image_id' - assert wait(cephadm_module, cephadm_module.upgrade_resume()) == 'Resumed upgrade to image_id' + assert wait(cephadm_module, cephadm_module.upgrade_resume() + ) == 'Resumed upgrade to image_id' assert wait(cephadm_module, cephadm_module.upgrade_stop()) == 'Stopped upgrade to image_id' @@ -29,7 +32,8 @@ def test_upgrade_run(cephadm_module: CephadmOrchestrator): 'who': 'global', }) with with_service(cephadm_module, ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr, 'test'): - assert wait(cephadm_module, cephadm_module.upgrade_start('to_image', None)) == 'Initiating upgrade to to_image' + assert wait(cephadm_module, cephadm_module.upgrade_start( + 'to_image', None)) == 'Initiating upgrade to to_image' assert wait(cephadm_module, cephadm_module.upgrade_status()).target_image == 'to_image' diff --git a/src/pybind/mgr/cephadm/tests/test_utils.py b/src/pybind/mgr/cephadm/tests/test_utils.py index 9ae6d61590c0c..4a76482f89c7b 100644 --- a/src/pybind/mgr/cephadm/tests/test_utils.py +++ b/src/pybind/mgr/cephadm/tests/test_utils.py @@ -3,6 +3,7 @@ import pytest from orchestrator import OrchestratorError from cephadm.utils import name_to_auth_entity + def test_name_to_auth_entity(fs): for daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]: @@ -29,6 +30,6 @@ def test_name_to_auth_entity(fs): assert "%s.id1" % daemon_type == name_to_auth_entity(daemon_type, "id1") with pytest.raises(OrchestratorError): - name_to_auth_entity("whatever", "id1", "host") - name_to_auth_entity("whatever", "id1", "") - name_to_auth_entity("whatever", "id1") + name_to_auth_entity("whatever", "id1", "host") + name_to_auth_entity("whatever", "id1", "") + name_to_auth_entity("whatever", "id1") diff --git a/src/pybind/mgr/orchestrator/tests/test_orchestrator.py b/src/pybind/mgr/orchestrator/tests/test_orchestrator.py index b5b45a2caa36c..fff2e245ce107 100644 --- a/src/pybind/mgr/orchestrator/tests/test_orchestrator.py +++ b/src/pybind/mgr/orchestrator/tests/test_orchestrator.py @@ -98,6 +98,7 @@ def some_complex_completion(): lambda four: four + 1)) return c + def test_promise_mondatic_then_combined(): p = some_complex_completion() p.finalize() @@ -135,13 +136,15 @@ def test_progress(): completion=lambda: Completion( on_complete=lambda _: progress_val)) ) - mgr.remote.assert_called_with('progress', 'update', c.progress_reference.progress_id, 'hello world', 0.0, [('origin', 'orchestrator')]) + mgr.remote.assert_called_with('progress', 'update', c.progress_reference.progress_id, 'hello world', 0.0, [ + ('origin', 'orchestrator')]) c.finalize() mgr.remote.assert_called_with('progress', 'complete', c.progress_reference.progress_id) c.progress_reference.update() - mgr.remote.assert_called_with('progress', 'update', c.progress_reference.progress_id, 'hello world', progress_val, [('origin', 'orchestrator')]) + mgr.remote.assert_called_with('progress', 'update', c.progress_reference.progress_id, + 'hello world', progress_val, [('origin', 'orchestrator')]) assert not c.progress_reference.effective progress_val = 1 @@ -191,8 +194,8 @@ def test_fail(): assert isinstance(c.exception, KeyError) with pytest.raises(ValueError, - match='Invalid State: called fail, but Completion is already finished: {}'.format( - str(ZeroDivisionError()))): + match='Invalid State: called fail, but Completion is already finished: {}'.format( + str(ZeroDivisionError()))): c._first_promise.fail(ZeroDivisionError()) @@ -234,6 +237,7 @@ def test_pretty_print(): assert p.result == 5 + def test_apply(): to = _TestOrchestrator('', 0, 0) completion = to.apply([ @@ -242,7 +246,7 @@ def test_apply(): ServiceSpec(service_type='nfs'), ]) completion.finalize(42) - assert completion.result == [None, None, None] + assert completion.result == [None, None, None] def test_yaml(): @@ -288,5 +292,6 @@ def test_event_multiline(): e = OrchestratorEvent(datetime.datetime.utcnow(), 'service', 'subject', 'ERROR', 'message') assert OrchestratorEvent.from_json(e.to_json()) == e - e = OrchestratorEvent(datetime.datetime.utcnow(), 'service', 'subject', 'ERROR', 'multiline\nmessage') + e = OrchestratorEvent(datetime.datetime.utcnow(), 'service', + 'subject', 'ERROR', 'multiline\nmessage') assert OrchestratorEvent.from_json(e.to_json()) == e