From 22c8cdad8ee1d7376c7d200bdb6ec94ed6d3b5e6 Mon Sep 17 00:00:00 2001 From: xie xingguo Date: Wed, 20 Nov 2019 08:31:54 +0800 Subject: [PATCH] osd/PeeringState: do not exclude up from acting_recovery_backfill If we choose a primary that does not belong to the current up set, and all up peers are still recoverable, then we might end up excluding some up peer from the acting_recovery_backfill set too due to the "want size <= pool size" constraint (since https://github.com/ceph/ceph/pull/24035), as a result of which all up peers might not get recovered in one go. Fix by falling through any oversized want set to async recovery, which should be able to handle it nicely. Fixes: https://tracker.ceph.com/issues/42577 Signed-off-by: xie xingguo --- src/osd/PeeringState.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/osd/PeeringState.cc b/src/osd/PeeringState.cc index 17030737506..a9b5b4fde41 100644 --- a/src/osd/PeeringState.cc +++ b/src/osd/PeeringState.cc @@ -2077,6 +2077,14 @@ bool PeeringState::choose_acting(pg_shard_t &auth_log_shard_id, get_osdmap()); } } + while (want.size() > pool.info.size) { + // async recovery should have taken out as many osds as it can. + // if not, then always evict the last peer + // (will get synchronously recovered later) + psdout(10) << __func__ << " evicting osd." << want.back() + << " from oversized want " << want << dendl; + want.pop_back(); + } if (want != acting) { psdout(10) << __func__ << " want " << want << " != acting " << acting << ", requesting pg_temp change" << dendl; -- 2.39.5