- ceph nfs cluster ls | grep foo
- ceph nfs export ls foo --detailed
- rados -p .nfs --all ls -
- - ceph config get mgr mgr/cephadm/migration_current | grep 3
+ - ceph config get mgr mgr/cephadm/migration_current | grep 4
if TYPE_CHECKING:
from .module import CephadmOrchestrator
-LAST_MIGRATION = 3
+LAST_MIGRATION = 4
logger = logging.getLogger(__name__)
self.nfs_migration_queue = json.loads(v) if v else []
# for some migrations, we don't need to do anything except for
- # setting migration_current = 1.
+ # incrementing migration_current.
# let's try to shortcut things here.
self.migrate(True)
if self.migrate_2_3():
self.set(3)
+ if self.mgr.migration_current == 3:
+ if self.migrate_3_4():
+ self.set(4)
+
def migrate_0_1(self) -> bool:
"""
Migration 0 -> 1
self.mgr.log.warning(f'Failed to migrate export ({ret}): {err}\nExport was:\n{ex}')
self.mgr.log.info(f'Done migrating nfs.{service_id}')
+ def migrate_3_4(self) -> bool:
+ # We can't set any host with the _admin label, but we're
+ # going to warn when calling `ceph orch host rm...`
+ if 'client.admin' not in self.mgr.keys.keys:
+ self.mgr._client_keyring_set(
+ entity='client.admin',
+ placement='label:_admin',
+ )
+ return True
+
def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
"""
for d in daemons:
daemons_table += "{:<20} {:<15}\n".format(d.daemon_type, d.daemon_id)
- return "Not allowed to remove %s from cluster. " \
- "The following daemons are running in the host:" \
- "\n%s\nPlease run 'ceph orch host drain %s' to remove daemons from host" % (
- host, daemons_table, host)
+ raise OrchestratorValidationError("Not allowed to remove %s from cluster. "
+ "The following daemons are running in the host:"
+ "\n%s\nPlease run 'ceph orch host drain %s' to remove daemons from host" % (
+ host, daemons_table, host))
+
+ # check, if there we're removing the last _admin host
+ if not force:
+ p = PlacementSpec(label='_admin')
+ admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs())
+ if len(admin_hosts) == 1 and admin_hosts[0] == host:
+ raise OrchestratorValidationError(f"Host {host} is the last host with the '_admin'"
+ " label. Please add the '_admin' label to a host"
+ " or add --force to this command")
def run_cmd(cmd_args: dict) -> None:
ret, out, err = self.mon_command(cmd_args)
@contextmanager
-def with_host(m: CephadmOrchestrator, name, addr='1::4', refresh_hosts=True):
- # type: (CephadmOrchestrator, str) -> None
+def with_host(m: CephadmOrchestrator, name, addr='1::4', refresh_hosts=True, rm_with_force=True):
with mock.patch("cephadm.utils.resolve_ip", return_value=addr):
wait(m, m.add_host(HostSpec(hostname=name)))
if refresh_hosts:
CephadmServe(m)._refresh_hosts_and_daemons()
receive_agent_metadata(m, name)
yield
- wait(m, m.remove_host(name))
+ wait(m, m.remove_host(name, force=rm_with_force))
def assert_rm_service(cephadm: CephadmOrchestrator, srv_name):
assert len(cephadm_module.cache.get_daemons_by_type('mgr')) == 3
assert len(cephadm_module.cache.get_daemons_by_type('crash')) == 1
+ cephadm_module.offline_hosts = {}
+
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@mock.patch("cephadm.CephadmOrchestrator._host_ok_to_stop")
@mock.patch("cephadm.module.HostCache.get_daemon_types")
with with_osd_daemon(cephadm_module, _run_cephadm, 'test', 1, ceph_volume_lvm_list=_ceph_volume_list):
pass
+
+ @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+ def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator):
+ with pytest.raises(OrchestratorError):
+ with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False):
+ cephadm_module.inventory.add_label('test', '_admin')
+ pass
+ assert False
+ with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True):
+ with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False):
+ cephadm_module.inventory.add_label('test2', '_admin')
from ceph.utils import datetime_to_str, datetime_now
from cephadm import CephadmOrchestrator
from cephadm.inventory import SPEC_STORE_PREFIX
+from cephadm.migrations import LAST_MIGRATION
from cephadm.tests.fixtures import _run_cephadm, wait, with_host, receive_agent_metadata_all_hosts
from cephadm.serve import CephadmServe
from tests import mock
assert cephadm_module.migration_current == 2
cephadm_module.migration.migrate()
- assert cephadm_module.migration_current == 3
+ assert cephadm_module.migration_current == LAST_MIGRATION
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
assert cephadm_module.migration_current == 2
cephadm_module.migration.migrate()
- assert cephadm_module.migration_current == 3
+ assert cephadm_module.migration_current == LAST_MIGRATION
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator):
+ assert 'client.admin' not in cephadm_module.keys.keys
+
+ cephadm_module.migration_current = 3
+ cephadm_module.migration.migrate()
+ assert cephadm_module.migration_current == LAST_MIGRATION
+
+ assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin'