from cephadm.ssh import HostConnectionError
from orchestrator import OrchestratorError, DaemonDescription, DaemonDescriptionStatus, daemon_type_to_service
+from mgr_module import MonCommandFailed
+
if TYPE_CHECKING:
from .module import CephadmOrchestrator
if osd_min < int(target_major):
logger.info(
f'Upgrade: Setting require_osd_release to {target_major} {target_major_name}')
- ret, _, err = self.mgr.check_mon_command({
- 'prefix': 'osd require-osd-release',
- 'release': target_major_name,
- })
+ try:
+ ret, out, err = self.mgr.check_mon_command({
+ 'prefix': 'osd require-osd-release',
+ 'release': target_major_name,
+ })
+ except MonCommandFailed as e:
+ # recently it was changed so that `ceph osd require-osd-release`
+ # will fail if run on a cluster with no OSDs unless --yes-i-really-mean-it
+ # is passed. If we get that specific failure and we actually have no OSD
+ # daemons, we should just try to pass the flag
+ if "no OSDs are up" in str(e):
+ if not self.mgr.cache.get_daemons_by_type('osd'):
+ # this is the case where we actually have no OSDs in the cluster
+ ret, _, err = self.mgr.check_mon_command({
+ 'prefix': 'osd require-osd-release',
+ 'release': target_major_name,
+ 'yes_i_really_mean_it': True
+ })
+ else:
+ # this is the case where we do have OSDs listed, but none of them are up
+ raise OrchestratorError(
+ 'All OSDs down, causing a failure setting the minimum required OSD release. '
+ 'If you are sure you\'d like to move forward, please run '
+ '"ceph osd require-osd-release --yes-i-really-mean-it" then resume the upgrade')
+ else:
+ raise
def _complete_mds_upgrade(self) -> None:
assert self.upgrade_state is not None