bool is_repair) {
if (get_parent()->check_failsafe_full()) {
dout(10) << __func__ << " Out of space (failsafe) processing push request."
- << dendl;
+ << dendl;
ceph_abort();
}
}
if (op.before_progress.first) {
- ceph_assert(op.attrset.count(string("_")));
+ ceph_assert(op.attrset.contains(OI_ATTR));
m->t.setattrs(
coll,
tobj,
}
void ECBackend::RecoveryBackend::handle_recovery_push_reply(
- const PushReplyOp &op,
- pg_shard_t from,
- RecoveryMessages *m) {
+ const PushReplyOp &op,
+ pg_shard_t from,
+ RecoveryMessages *m) {
if (!recovery_ops.count(op.soid))
return;
RecoveryOp &rop = recovery_ops[op.soid];
}
void ECBackend::RecoveryBackend::handle_recovery_read_complete(
- const hobject_t &hoid,
- ECUtil::shard_extent_map_t &&buffers_read,
- std::optional<map<string, bufferlist, less<>>> attrs,
- const ECUtil::shard_extent_set_t &want_to_read,
- RecoveryMessages *m) {
+ const hobject_t &hoid,
+ ECUtil::shard_extent_map_t &&buffers_read,
+ std::optional<map<string, bufferlist, less<>>> attrs,
+ const ECUtil::shard_extent_set_t &want_to_read,
+ RecoveryMessages *m) {
dout(10) << __func__ << ": returned " << hoid << " " << buffers_read << dendl;
ceph_assert(recovery_ops.contains(hoid));
RecoveryBackend::RecoveryOp &op = recovery_ops[hoid];
}
}
- dout(20) << __func__ << ": oid=" << op.hoid << " "
- << op.returned_data->debug_string(2048, 8) << dendl;
+ dout(20) << __func__ << ": oid=" << op.hoid << dendl;
+ dout(30) << __func__ << "EC_DEBUG_BUFFERS: "
+ << op.returned_data->debug_string(2048, 8)
+ << dendl;
continue_recovery_op(op, m);
}
<< dendl;
} else {
get_parent()->clog_error() << "Error " << r
- << " reading object "
- << hoid;
+ << " reading object " << hoid;
dout(5) << __func__ << ": Error " << r
<< " reading " << hoid << dendl;
}
if (!hinfo) {
r = -EIO;
get_parent()->clog_error() << "Corruption detected: object "
- << hoid
- << " is missing hash_info";
+ << hoid << " is missing hash_info";
dout(5) << __func__ << ": No hinfo for " << hoid << dendl;
goto error;
}
<< hex << h.digest() << " expected 0x" << hinfo->
get_chunk_hash(shard) << dec;
dout(5) << __func__ << ": Bad hash for " << hoid << " digest 0x"
- << hex << h.digest() << " expected 0x" << hinfo->
-get_chunk_hash(shard) << dec << dendl;
+ << hex << h.digest() << " expected 0x"
+ << hinfo->get_chunk_hash(shard) << dec << dendl;
r = -EIO;
goto error;
}
}
void ECBackend::handle_sub_read_reply(
- pg_shard_t from,
- ECSubReadReply &op,
- const ZTracer::Trace &trace) {
+ pg_shard_t from,
+ ECSubReadReply &op,
+ const ZTracer::Trace &trace) {
trace.event("ec sub read reply");
dout(10) << __func__ << ": reply " << op << dendl;
map<ceph_tid_t, ReadOp>::iterator iter = read_pipeline.tid_to_read_map.
for (auto &&[hoid, attr]: op.attrs_read) {
ceph_assert(!op.errors.count(hoid));
// if read error better not have sent an attribute
- if (!rop.to_read.count(hoid)) {
+ if (!rop.to_read.contains(hoid)) {
// We canceled this read! @see filter_read_op
dout(20) << __func__ << " to_read skipping" << dendl;
continue;
rop.to_read.at(oid).shard_want_to_read.
populate_shard_id_set(want_to_read);
+ dout(20) << __func__ << " read_result: " << read_result << dendl;
+
int err = ec_impl->minimum_to_decode(want_to_read, have, dummy_minimum,
nullptr);
if (err) {
if (for_recovery) {
for (auto &&pg_shard: get_parent()->get_backfill_shards()) {
- if (error_shards && error_shards->contains(pg_shard))
+ if (error_shards && error_shards->contains(pg_shard)) {
continue;
+ }
const shard_id_t &shard = pg_shard.shard;
if (have.contains(shard)) {
ceph_assert(shards.contains(shard));
(*need_sub_chunks)[i] = subchunks_list;
}
}
- for (auto &&i: have) {
- need_set.insert(i);
- }
+ need_set.insert(have);
}
extent_set extra_extents;
extent_map result;
if (res.r == 0) {
ceph_assert(res.errors.empty());
-#if DEBUG_EC_BUFFERS
- dout(20) << __func__ << ": before decode: " << res.buffers_read.debug_string(2048, 8) << dendl;
-#endif
+ dout(30) << __func__ << ": before decode: "
+ << res.buffers_read.debug_string(2048, 8)
+ << dendl;
/* Decode any missing buffers */
int r = res.buffers_read.decode(read_pipeline.ec_impl,
req.shard_want_to_read,
req.object_size);
ceph_assert( r == 0 );
-
-#if DEBUG_EC_BUFFERS
- dout(20) << __func__ << ": after decode: " << res.buffers_read.debug_string(2048, 8) << dendl;
-#endif
+ dout(30) << __func__ << ": after decode: "
+ << res.buffers_read.debug_string(2048, 8)
+ << dendl;
for (auto &&read: req.to_read) {
result.insert(read.offset, read.size,
if (transaction.empty()) {
dout(20) << __func__ << " Transaction for osd." << pg_shard.osd << " shard " << shard << " is empty" << dendl;
} else {
+ // NOTE: All code between dout and dendl is executed conditionally on
+ // debug level.
dout(20) << __func__ << " Transaction for osd." << pg_shard.osd << " shard " << shard << " contents ";
Formatter *f = Formatter::create("json");
f->open_object_section("t");
if (op.skip_transaction(pending_roll_forward, shard, transaction)) {
// Must be an empty transaction
ceph_assert(transaction.empty());
- dout(20) << __func__ << " Skipping transaction for osd." << shard << dendl;
+ dout(20) << __func__ << " Skipping transaction for shard " << shard << dendl;
continue;
}
op.pending_commits++;
DoutPrefixProvider *dpp,
const OSDMapRef &osdmap
) override {
- // NOP, as -- in constrast to ECClassicalOp -- there is no
+ // NOP, as -- in contrast to ECClassicalOp -- there is no
// transaction involved
}
if (extent_cache.idle()) {
if (op->version > get_parent()->get_log().get_can_rollback_to()) {
- const int transactions_since_last_idle = extent_cache.
- get_and_reset_counter();
- dout(20) << __func__ << " version=" << op->version << " ec_counter=" <<
- transactions_since_last_idle << dendl;
+ dout(20) << __func__ << " cache idle " << op->version << dendl;
// submit a dummy, transaction-empty op to kick the rollforward
const auto tid = get_parent()->get_tid();
const auto nop = std::make_shared<ECDummyOp>();
} else {
os << ", noattrs";
}
- os << ", buffers_read=" << buffers_read << ")";
+ os << ", buffers_read=" << buffers_read;
+ os << ", processed_read_requests=" << processed_read_requests << ")";
}
};
op->object.request(op);
}
waiting_ops.insert(waiting_ops.end(), op_list.begin(), op_list.end());
- counter++;
cache_maybe_ready();
}
return active_ios == 0;
}
-uint32_t ECExtentCache::get_and_reset_counter() {
- uint32_t ret = counter;
- counter = 0;
- return ret;
-}
-
list<ECExtentCache::LRU::Key>::iterator ECExtentCache::LRU::erase(
const list<Key>::iterator &it,
bool do_update_mempool) {
const ECUtil::stripe_info_t &sinfo;
std::list<OpRef> waiting_ops;
void cache_maybe_ready();
- uint32_t counter = 0;
uint32_t active_ios = 0;
CephContext *cct;
void execute(std::list<OpRef> &op_list);
[[nodiscard]] bool idle() const;
- uint32_t get_and_reset_counter();
void add_on_write(std::function<void(void)> &&cb) const {
if (waiting_ops.empty()) {
using ceph::ErasureCodeInterfaceRef;
void debug(const hobject_t &oid, const std::string &str,
- const ECUtil::shard_extent_map_t &map, DoutPrefixProvider *dpp
- ) {
-#if DEBUG_EC_BUFFERS
+ const ECUtil::shard_extent_map_t &map, DoutPrefixProvider *dpp) {
ldpp_dout(dpp, 20)
- << "EC_DEBUG_BUFFERS: generate_transactions: "
- << "oid: " << oid
- << " " << str << " " << map.debug_string(2048, 8) << dendl;
-#else
- ldpp_dout(dpp, 20)
- << "generate_transactions: "
- << "oid: " << oid
- << str << map << dendl;
-#endif
+ << " generate_transactions: " << "oid: " << oid << str << map << dendl;
+ ldpp_dout(dpp, 30)
+ << "EC_DEBUG_BUFFERS: " << map.debug_string(2048, 8) << dendl;
}
void ECTransaction::Generate::encode_and_write() {
}
uint64_t new_shard_size = eset.range_end();
- if (new_shard_size == old_shard_size) continue;
+ if (new_shard_size == old_shard_size) {
+ continue;
+ }
uint64_t write_end = 0;
if (plan.will_write.contains(shard)) {
write_end = plan.will_write.at(shard).range_end();
}
- if (write_end == new_shard_size) continue;
+ if (write_end == new_shard_size) {
+ continue;
+ }
/* If code is executing here, it means that the written part of the
* shard does not reflect the size that EC believes the shard to be.
for (shard_id_t shard; shard < sinfo.get_k_plus_m(); ++shard) {
if (sinfo.is_nonprimary_shard(shard)) {
if (entry->is_written_shard(shard) || plan.orig_size != plan.
- projected_size) {
- // Written - erase per shard version
- if (oi.shard_versions.erase(shard)) {
- update = true;
- }
- } else if (!oi.shard_versions.count(shard)) {
- // Unwritten shard, previously up to date
- oi.shard_versions[shard] = oi.prior_version;
+ projected_size) {
+ // Written - erase per shard version
+ if (oi.shard_versions.erase(shard)) {
update = true;
- } else {
- // Unwritten shard, already out of date
}
+ } else if (!oi.shard_versions.count(shard)) {
+ // Unwritten shard, previously up to date
+ oi.shard_versions[shard] = oi.prior_version;
+ update = true;
+ } else {
+ // Unwritten shard, already out of date
+ }
} else {
// Primary shards are always written and use oi.version
}
// Update cached OI
obc->obs.oi.shard_versions = oi.shard_versions;
}
- ldpp_dout(dpp, 20) << __func__ << "shard_info: version=" << entry->version
+ ldpp_dout(dpp, 20) << __func__ << " shard_info: oid=" << oid
+ << " version=" << entry->version
<< " present=" << entry->present_shards
<< " written=" << entry->written_shards
<< " shard_versions=" << oi.shard_versions << dendl;
const unsigned pdw_write_mode);
void print(std::ostream &os) const {
- os << "to_read: " << to_read
+ os << "{hoid: " << hoid
+ << " to_read: " << to_read
<< " will_write: " << will_write
<< " hinfo: " << hinfo
<< " shinfo: " << shinfo
<< " orig_size: " << orig_size
<< " projected_size: " << projected_size
<< " invalidates_cache: " << invalidates_cache
- << " do_pdw: " << do_parity_delta_write;
+ << " do_pdw: " << do_parity_delta_write
+ << "}";
}
};
std::list<WritePlanObj> plans;
void print(std::ostream &os) const {
- os << " { plans : ";
+ os << " plans: [";
bool first = true;
for (auto && p : plans) {
if (first) {
}
os << p;
}
- os << "}";
+ os << "]";
}
};
}
};
-// Setting to 1 turns on very large amounts of level 0 debug containing the
-// contents of buffers. Even on level 20 this is not really wanted.
-#define DEBUG_EC_BUFFERS 1
-
namespace ECUtil {
class shard_extent_map_t;
/** return the sum of extent_set.size */
uint64_t size() const {
uint64_t size = 0;
- for (auto &&[_, e] : map) size += e.size();
+ for (auto &&[_, e] : map) {
+ size += e.size();
+ }
return size;
}
tinfo.partial_writes_last_complete = info.partial_writes_last_complete;
if (!tinfo.partial_writes_last_complete.empty()) {
psdout(20) << "sending info to " << from
- << " pwcl=" << tinfo.partial_writes_last_complete
+ << " pwlc=" << tinfo.partial_writes_last_complete
<< " info=" << tinfo
<< dendl;
}