]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: Add client.admin keyring when upgrading from older version
authorSebastian Wagner <sewagner@redhat.com>
Mon, 13 Sep 2021 14:03:02 +0000 (16:03 +0200)
committerSebastian Wagner <sewagner@redhat.com>
Mon, 29 Nov 2021 10:51:11 +0000 (11:51 +0100)
Signed-off-by: Sebastian Wagner <sewagner@redhat.com>
qa/suites/orch/cephadm/mgr-nfs-upgrade/4-final.yaml
src/pybind/mgr/cephadm/migrations.py
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/fixtures.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/cephadm/tests/test_migration.py

index 11e8bb3b8d8cadc1c6182188afb0813dbd050631..b1957d9d62880f3febb0fc9eba88a800ac994f5b 100644 (file)
@@ -7,4 +7,4 @@ tasks:
       - ceph nfs cluster ls | grep foo
       - ceph nfs export ls foo --detailed
       - rados -p .nfs --all ls -
-      - ceph config get mgr mgr/cephadm/migration_current | grep 3
+      - ceph config get mgr mgr/cephadm/migration_current | grep 4
index e5a73f306896e10379d976a9686c848da7c696fe..b66eedb6907ac7abc7a6dfa3bc6c9620a56b95de 100644 (file)
@@ -12,7 +12,7 @@ from orchestrator import OrchestratorError, DaemonDescription
 if TYPE_CHECKING:
     from .module import CephadmOrchestrator
 
-LAST_MIGRATION = 3
+LAST_MIGRATION = 4
 
 logger = logging.getLogger(__name__)
 
@@ -38,7 +38,7 @@ class Migrations:
         self.nfs_migration_queue = json.loads(v) if v else []
 
         # for some migrations, we don't need to do anything except for
-        # setting migration_current = 1.
+        # incrementing migration_current.
         # let's try to shortcut things here.
         self.migrate(True)
 
@@ -68,6 +68,10 @@ class Migrations:
             if self.migrate_2_3():
                 self.set(3)
 
+        if self.mgr.migration_current == 3:
+            if self.migrate_3_4():
+                self.set(4)
+
     def migrate_0_1(self) -> bool:
         """
         Migration 0 -> 1
@@ -259,6 +263,16 @@ class Migrations:
                 self.mgr.log.warning(f'Failed to migrate export ({ret}): {err}\nExport was:\n{ex}')
         self.mgr.log.info(f'Done migrating nfs.{service_id}')
 
+    def migrate_3_4(self) -> bool:
+        # We can't set any host with the _admin label, but we're
+        # going to warn when calling `ceph orch host rm...`
+        if 'client.admin' not in self.mgr.keys.keys:
+            self.mgr._client_keyring_set(
+                entity='client.admin',
+                placement='label:_admin',
+            )
+        return True
+
 
 def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
     """
index c5f9d634d96d17bca5cf3d1c216ca3d69cbe541c..414013ad5a017f5133bd573911f8f00e813a9334 100644 (file)
@@ -1402,10 +1402,19 @@ Then run the following:
                 for d in daemons:
                     daemons_table += "{:<20} {:<15}\n".format(d.daemon_type, d.daemon_id)
 
-                return "Not allowed to remove %s from cluster. " \
-                    "The following daemons are running in the host:" \
-                    "\n%s\nPlease run 'ceph orch host drain %s' to remove daemons from host" % (
-                        host, daemons_table, host)
+                raise OrchestratorValidationError("Not allowed to remove %s from cluster. "
+                                                  "The following daemons are running in the host:"
+                                                  "\n%s\nPlease run 'ceph orch host drain %s' to remove daemons from host" % (
+                                                      host, daemons_table, host))
+
+        # check, if there we're removing the last _admin host
+        if not force:
+            p = PlacementSpec(label='_admin')
+            admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs())
+            if len(admin_hosts) == 1 and admin_hosts[0] == host:
+                raise OrchestratorValidationError(f"Host {host} is the last host with the '_admin'"
+                                                  " label. Please add the '_admin' label to a host"
+                                                  " or add --force to this command")
 
         def run_cmd(cmd_args: dict) -> None:
             ret, out, err = self.mon_command(cmd_args)
index 385a93a132da4f0747ccd0a291e7f1f9fff19974..75ce9900661b0d36bd75c5484c56b6af7447020b 100644 (file)
@@ -129,15 +129,14 @@ def wait(m, c):
 
 
 @contextmanager
-def with_host(m: CephadmOrchestrator, name, addr='1::4', refresh_hosts=True):
-    # type: (CephadmOrchestrator, str) -> None
+def with_host(m: CephadmOrchestrator, name, addr='1::4', refresh_hosts=True, rm_with_force=True):
     with mock.patch("cephadm.utils.resolve_ip", return_value=addr):
         wait(m, m.add_host(HostSpec(hostname=name)))
         if refresh_hosts:
             CephadmServe(m)._refresh_hosts_and_daemons()
             receive_agent_metadata(m, name)
         yield
-        wait(m, m.remove_host(name))
+        wait(m, m.remove_host(name, force=rm_with_force))
 
 
 def assert_rm_service(cephadm: CephadmOrchestrator, srv_name):
index 41513a7fea96b21d2d454a633cb53b870b0abdea..889192a332be6a29a507dd06abf518ca586d6e22 100644 (file)
@@ -1356,6 +1356,8 @@ class TestCephadm(object):
                             assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3
                             assert len(cephadm_module.cache.get_daemons_by_type('crash')) == 1
 
+                        cephadm_module.offline_hosts = {}
+
     @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
     @mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
     @mock.patch("cephadm.module.HostCache.get_daemon_types")
@@ -1663,3 +1665,14 @@ Traceback (most recent call last):
 
             with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list):
                 pass
+
+    @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+    def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator):
+        with pytest.raises(OrchestratorError):
+            with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False):
+                cephadm_module.inventory.add_label('test', '_admin')
+                pass
+            assert False
+        with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True):
+            with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False):
+                cephadm_module.inventory.add_label('test2', '_admin')
index ce35672e49d9dbe0131c8a8a899e46e043320f4e..1c73897cb852734e3080ac31dcd1540b99c98d73 100644 (file)
@@ -4,6 +4,7 @@ from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlaceme
 from ceph.utils import datetime_to_str, datetime_now
 from cephadm import CephadmOrchestrator
 from cephadm.inventory import SPEC_STORE_PREFIX
+from cephadm.migrations import LAST_MIGRATION
 from cephadm.tests.fixtures import _run_cephadm, wait, with_host, receive_agent_metadata_all_hosts
 from cephadm.serve import CephadmServe
 from tests import mock
@@ -182,7 +183,7 @@ def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator):
         assert cephadm_module.migration_current == 2
 
         cephadm_module.migration.migrate()
-        assert cephadm_module.migration_current == 3
+        assert cephadm_module.migration_current == LAST_MIGRATION
 
 
 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
@@ -215,4 +216,15 @@ def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator):
         assert cephadm_module.migration_current == 2
 
         cephadm_module.migration.migrate()
-        assert cephadm_module.migration_current == 3
+        assert cephadm_module.migration_current == LAST_MIGRATION
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator):
+    assert 'client.admin' not in cephadm_module.keys.keys
+
+    cephadm_module.migration_current = 3
+    cephadm_module.migration.migrate()
+    assert cephadm_module.migration_current == LAST_MIGRATION
+
+    assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin'