return new ECRecoveryHandle;
}
+void ECBackend::_failed_push(const hobject_t &hoid,
+ pair<RecoveryMessages *, ECBackend::read_result_t &> &in)
+{
+ ECBackend::read_result_t &res = in.second;
+ dout(10) << __func__ << ": Read error " << hoid << " r="
+ << res.r << " errors=" << res.errors << dendl;
+ dout(10) << __func__ << ": canceling recovery op for obj " << hoid
+ << dendl;
+ assert(recovery_ops.count(hoid));
+ recovery_ops.erase(hoid);
+
+ list<pg_shard_t> fl;
+ for (auto&& i : res.errors) {
+ fl.push_back(i.first);
+ }
+ get_parent()->failed_push(fl, hoid);
+}
+
struct OnRecoveryReadComplete :
public GenContext<pair<RecoveryMessages*, ECBackend::read_result_t& > &> {
ECBackend *pg;
: pg(pg), hoid(hoid) {}
void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) {
ECBackend::read_result_t &res = in.second;
- // FIXME???
- assert(res.r == 0);
- assert(res.errors.empty());
+ if (!(res.r == 0 && res.errors.empty())) {
+ pg->_failed_push(hoid, in);
+ return;
+ }
assert(res.returned.size() == 1);
pg->handle_recovery_read_complete(
hoid,
unsigned is_complete = 0;
// For redundant reads check for completion as each shard comes in,
// or in a non-recovery read check for completion once all the shards read.
+ // TODO: It would be nice if recovery could send more reads too
if (rop.do_redundant_reads || (!rop.for_recovery && rop.in_progress.empty())) {
for (map<hobject_t, read_result_t>::const_iterator iter =
rop.complete.begin();
uint64_t be_get_ondisk_size(uint64_t logical_size) {
return sinfo.logical_to_next_chunk_offset(logical_size);
}
+ void _failed_push(const hobject_t &hoid,
+ pair<RecoveryMessages *, ECBackend::read_result_t &> &in);
};
#endif
pg_shard_t peer,
const hobject_t oid) = 0;
- virtual void failed_push(pg_shard_t from, const hobject_t &soid) = 0;
+ virtual void failed_push(const list<pg_shard_t> &from, const hobject_t &soid) = 0;
virtual void cancel_pull(const hobject_t &soid) = 0;
void ReplicatedBackend::_failed_push(pg_shard_t from, const hobject_t &soid)
{
- get_parent()->failed_push(from, soid);
+ list<pg_shard_t> fl = { from };
+ get_parent()->failed_push(fl, soid);
pull_from_peer[from].erase(soid);
if (pull_from_peer[from].empty())
pull_from_peer.erase(from);
}
}
-
-void ReplicatedPG::failed_push(pg_shard_t from, const hobject_t &soid)
+void ReplicatedPG::failed_push(const list<pg_shard_t> &from, const hobject_t &soid)
{
assert(recovering.count(soid));
recovering.erase(soid);
- missing_loc.remove_location(soid, from);
+ for (auto&& i : from)
+ missing_loc.remove_location(soid, i);
dout(0) << __func__ << " " << soid << " from shard " << from
<< ", reps on " << missing_loc.get_locations(soid)
<< " unfound? " << missing_loc.is_unfound(soid) << dendl;
void on_global_recover(
const hobject_t &oid,
const object_stat_sum_t &stat_diff) override;
- void failed_push(pg_shard_t from, const hobject_t &soid) override;
+ void failed_push(const list<pg_shard_t> &from, const hobject_t &soid) override;
void cancel_pull(const hobject_t &soid) override;
template<class T> class BlessedGenContext;