for (map<pg_t,vector<int32_t> >::iterator p = osdmap.pg_temp->begin();
p != osdmap.pg_temp->end();
++p) {
-
- // if pool does not exist, remove any existing pg_temps associated with
- // it. we don't care about pg_temps on the pending_inc either; if there
- // are new_pg_temp entries on the pending, clear them out just as well.
- if (!osdmap.have_pg_pool(p->first.pool())) {
- ldout(cct, 10) << " removing pg_temp " << p->first
- << " for inexistent pool " << p->first.pool() << dendl;
- pending_inc->new_pg_temp[p->first].clear();
-
- } else if (pending_inc->new_pg_temp.count(p->first) == 0) {
+ if (pending_inc->new_pg_temp.count(p->first) == 0) {
vector<int> raw_up;
int primary;
osdmap.pg_to_raw_up(p->first, &raw_up, &primary);
void OSDMap::remove_down_temps(CephContext *cct,
const OSDMap& osdmap, Incremental *pending_inc)
{
- ldout(cct, 10) << "remove_down_pg_temp" << dendl;
+ ldout(cct, 10) << __func__ << dendl;
OSDMap tmpmap;
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(*pending_inc);
for (map<pg_t,vector<int32_t> >::iterator p = tmpmap.pg_temp->begin();
p != tmpmap.pg_temp->end();
++p) {
+ // if pool does not exist, remove any existing pg_temps associated with
+ // it. we don't care about pg_temps on the pending_inc either; if there
+ // are new_pg_temp entries on the pending, clear them out just as well.
+ if (!osdmap.have_pg_pool(p->first.pool())) {
+ ldout(cct, 10) << __func__ << " removing pg_temp " << p->first
+ << " for nonexistent pool " << p->first.pool() << dendl;
+ pending_inc->new_pg_temp[p->first].clear();
+ continue;
+ }
// all osds down?
unsigned num_up = 0;
for (auto o : p->second) {