return true;
// Object is degraded if after last_backfill AND
- // we have are backfilling it
+ // we are backfilling it
if (peer == backfill_target &&
peer_info[peer].last_backfill <= soid &&
- backfill_pos >= soid &&
+ last_backfill_started >= soid &&
backfills_in_flight.count(soid))
return true;
}
}
}
- backfill_pos = backfill_info.begin > peer_backfill_info.begin ?
- peer_backfill_info.begin : backfill_info.begin;
- dout(10) << " backfill_pos now " << backfill_pos << dendl;
-
assert(waiting_on_backfill);
waiting_on_backfill = false;
finish_recovery_op(bi.begin);
osd_reqid_t(), ctx->new_obs.oi.mtime));
::encode(snaps, ctx->log.back().snaps);
- /**
- * In order to keep backfill_info up to date and avoid skipping this
- * new clone, we must add the clone to backfill_pos. Furthermore, this
- * is the only way in which an object might be created prior to backfill_pos
- * without having the transaction shipped to the replica
- */
- if (ctx->obs->oi.soid == backfill_pos) {
- backfill_info.objects.insert(make_pair(coid, ctx->at_version));
- backfill_info.begin = coid;
- backfill_pos = backfill_info.begin;
- }
-
ctx->at_version.version++;
}
pg_info_t& pinfo = peer_info[backfill_target];
if (soid <= pinfo.last_backfill)
pinfo.stats.stats.add(ctx->delta_stats, ctx->obc->obs.oi.category);
- else if (soid < backfill_pos)
+ else if (soid <= last_backfill_started)
pending_backfill_updates[soid].stats.add(ctx->delta_stats, ctx->obc->obs.oi.category);
}
}
// ship resulting transaction, log entries, and pg_stats
- if (peer == backfill_target && soid >= backfill_pos &&
- soid.pool == (int64_t)info.pgid.pool()) { // only skip normal (not temp pool=-1) objects
- dout(10) << "issue_repop shipping empty opt to osd." << peer << ", object beyond backfill_pos "
- << backfill_pos << ", last_backfill is " << pinfo.last_backfill << dendl;
+ if (peer == backfill_target && soid > last_backfill_started &&
+ // only skip normal (not temp pool=-1) objects
+ soid.pool == (int64_t)info.pgid.pool()) {
+ dout(10) << "issue_repop shipping empty opt to osd." << peer
+ <<", object beyond last_backfill_started"
+ << last_backfill_started << ", last_backfill is "
+ << pinfo.last_backfill << dendl;
ObjectStore::Transaction t;
::encode(t, wr->get_data());
} else {
if (peer_info[acting[i]].last_backfill != hobject_t::get_max()) {
assert(backfill_target == -1);
backfill_target = acting[i];
- backfill_pos = peer_info[acting[i]].last_backfill;
+ last_backfill_started = peer_info[acting[i]].last_backfill;
dout(10) << " chose backfill target osd." << backfill_target
- << " from " << backfill_pos << dendl;
+ << " from " << last_backfill_started << dendl;
}
}
}
#ifdef DEBUG_RECOVERY_OIDS
recovering_oids.clear();
#endif
- backfill_pos = hobject_t();
+ last_backfill_started = hobject_t();
list<OpRequestRef> blocked_ops;
set<hobject_t>::iterator i = backfills_in_flight.begin();
while (i != backfills_in_flight.end()) {
}
dout(10) << " peer osd." << backfill_target
- << " pos " << backfill_pos
+ << " last_backfill_started " << last_backfill_started
<< " info " << pinfo
<< " interval " << pbi.begin << "-" << pbi.end
<< " " << pbi.objects.size() << " objects" << dendl;
// update our local interval to cope with recent changes
- backfill_info.begin = backfill_pos;
+ backfill_info.begin = last_backfill_started;
update_range(&backfill_info, handle);
int ops = 0;
map<hobject_t, eversion_t> to_remove;
set<hobject_t> add_to_stat;
- pbi.trim();
- backfill_info.trim();
+ pbi.trim_to(last_backfill_started);
+ backfill_info.trim_to(last_backfill_started);
+ hobject_t backfill_pos = backfill_info.begin > pbi.begin ? pbi.begin : backfill_info.begin;
while (ops < max) {
if (backfill_info.begin <= pbi.begin &&
!backfill_info.extends_to_end() && backfill_info.empty()) {
waiting_for_degraded_object[pbi.begin]);
waiting_for_degraded_object.erase(pbi.begin);
}
+ last_backfill_started = pbi.begin;
pbi.pop_front();
// Don't increment ops here because deletions
// are cheap and not replied to unlike real recovery_ops,
waiting_for_degraded_object.erase(pbi.begin);
}
}
+ last_backfill_started = pbi.begin;
add_to_stat.insert(pbi.begin);
backfill_info.pop_front();
pbi.pop_front();
eversion_t(),
obc);
add_to_stat.insert(backfill_info.begin);
+ last_backfill_started = backfill_info.begin;
backfill_info.pop_front();
ops++;
} else {
* - are on the peer
* - are included in the peer stats
*
- * objects between last_backfill and backfill_pos
+ * objects \in (last_backfill, last_backfill_started]
* - are on the peer or are in backfills_in_flight
* - are not included in pg stats (yet)
* - have their stats in pending_backfill_updates on the primary
void dump_recovery_info(Formatter *f) const {
f->dump_int("backfill_target", get_backfill_target());
f->dump_int("waiting_on_backfill", waiting_on_backfill);
- f->dump_stream("backfill_pos") << backfill_pos;
+ f->dump_stream("last_backfill_started") << last_backfill_started;
{
f->open_object_section("backfill_info");
backfill_info.dump(f);
}
}
- /// leading edge of backfill
- hobject_t backfill_pos;
+ /// last backfill operation started
+ hobject_t last_backfill_started;
int prep_object_replica_pushes(const hobject_t& soid, eversion_t v,
PGBackend::RecoveryHandle *h);