ceph_assert(rop.in_progress.count(from));
rop.in_progress.erase(from);
unsigned is_complete = 0;
+ bool need_resend = false;
// For redundant reads check for completion as each shard comes in,
// or in a non-recovery read check for completion once all the shards read.
if (rop.do_redundant_reads || rop.in_progress.empty()) {
if (!rop.do_redundant_reads) {
int r = send_all_remaining_reads(iter->first, rop);
if (r == 0) {
- // We added to in_progress and not incrementing is_complete
+ // We changed the rop's to_read and not incrementing is_complete
+ need_resend = true;
continue;
}
// Couldn't read any additional shards so handle as completed with errors
rop.complete[iter->first].errors.clear();
}
}
+ // avoid re-read for completed object as we may send remaining reads for uncopmpleted objects
+ rop.to_read.at(iter->first).need.clear();
+ rop.to_read.at(iter->first).want_attrs = false;
++is_complete;
}
}
}
- if (rop.in_progress.empty() || is_complete == rop.complete.size()) {
+ if (need_resend) {
+ do_read_op(rop);
+ } else if (rop.in_progress.empty() ||
+ is_complete == rop.complete.size()) {
dout(20) << __func__ << " Complete: " << rop << dendl;
rop.trace.event("ec read complete");
complete_read_op(rop, m);
shards,
want_attrs,
c)));
- do_read_op(rop);
return 0;
}
};
struct read_request_t {
const std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > to_read;
- const std::map<pg_shard_t, std::vector<std::pair<int, int>>> need;
- const bool want_attrs;
+ std::map<pg_shard_t, std::vector<std::pair<int, int>>> need;
+ bool want_attrs;
GenContext<std::pair<RecoveryMessages *, read_result_t& > &> *cb;
read_request_t(
const std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > &to_read,