return 0;
}
+std::string ECBackend::extract_hex_from_bufferlist(
+ const bufferlist& bl)
+{
+ std::string hex_string;
+ const size_t length = bl.length();
+ for (size_t i = 0; i < length; i++) {
+ hex_string += fmt::format("{:02x}", hex_string[i]);
+ }
+ return hex_string;
+}
+
+std::string ECBackend::extract_crc_from_bufferlist(
+ const bufferlist& crc_buffer) {
+ std::string crc_string;
+ constexpr size_t digest_length = sizeof(uint32_t);
+ for (size_t i = 0; i < digest_length; i++) {
+ crc_string += fmt::format("{:02x}", crc_buffer[digest_length - i]);
+ }
+ return crc_string;
+}
+
int ECBackend::be_deep_scrub(
const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
return 0;
}
if (r > 0) {
+ dout(20) << __func__ << fmt::format("{}: Hoid {}, Shard {} ({}~{}) - Read data: {}", __func__, poid, get_parent()->whoami_shard(), pos.data_pos, r, extract_hex_from_bufferlist(bl)) << dendl;
pos.data_hash << bl;
}
perf_logger.inc(io_counters.read_bytes, r);
// We pass the calculated digest here
// This will be used along with the plugin to verify data consistency
o.digest = pos.data_hash.digest();
+ dout(20) << __func__ << fmt::format("{}: Hoid {}, Shard {} - Digest: 0x{:x}", __func__, poid, get_parent()->whoami_shard(), o.digest) << dendl;
}
else
{
m_is_optimized_ec = m_pool.info.allows_ecoptimizations();
// EC-related:
- if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode()) {
+ if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode() && m_depth == scrub_level_t::deep) {
m_ec_digest_map_size = m_pg.get_ec_sinfo().get_k_plus_m();
}
this_chunk->m_ec_digest_map.clear();
+ dout(20) << __func__ << ": Hoid:" << ho << ", Beginning setup" << dendl;
+
if (auth_selection.auth_oi.version != eversion_t() &&
- m_pg.get_ec_supports_crc_encode_decode()) {
+ m_pg.get_ec_supports_crc_encode_decode() &&
+ m_depth == scrub_level_t::deep) {
uint64_t auth_length = this_chunk->received_maps[auth_selection.auth_shard]
.objects.at(ho)
.size;
+ dout(20) << __func__ << ": Hoid:" << ho << ", crc encode/decode supported. Auth length: " << auth_length << dendl;
+
shard_id_set available_shards;
for (const auto& [srd, smap] : this_chunk->received_maps) {
if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode() &&
- smap.objects.contains(ho)) {
+ m_depth == scrub_level_t::deep && smap.objects.contains(ho)) {
uint64_t shard_length = smap.objects.at(ho).size;
+ dout(20) << __func__ << ": Hoid:" << ho << ", crc encode/decode supported again. Auth length: " << auth_length << ". Checking shard " << srd << ". Shard length: " << shard_length << dendl;
+
available_shards.insert(srd.shard);
uint32_t digest = smap.objects.at(ho).digest;
// require them to be the same length.
ceph_assert(auth_length >= shard_length);
int padding = auth_length - shard_length;
+ dout(20) << __func__ << "Padding length: " << padding << dendl;
if (padding != 0) {
+ dout(20) << fmt::format("{} hoid: {}, shard: {}, (A) Padding digest with {} zeros, going from {} to {}", __func__, ho, srd, padding, digest, ceph_crc32c_zeros(digest, padding)) << dendl;
digest = ceph_crc32c_zeros(digest, padding);
}
-1, logical_to_ondisk_size(auth_selection.auth_oi.size,
auth_selection.auth_shard.shard));
+ dout(20) << __func__ << fmt::format("Hoid {} (size {}) - Zero_data_crc: 0x{:x}", ho, logical_to_ondisk_size(auth_selection.auth_oi.size, auth_selection.auth_shard.shard), zero_data_crc) << dendl;
+
for (const auto& shard_id : m_pg.get_ec_sinfo().get_data_shards()) {
for (std::size_t i = 0; i < sizeof(zero_data_crc); i++) {
this_chunk->m_ec_digest_map.at(shard_id).c_str()[i] =
clog.error() << candidates_errors.str();
}
+ dout(20) << __func__ << ": About to setup" << dendl;
+
if (!m_is_replicated) {
+ dout(20) << __func__ << ": Not replicated" << dendl;
setup_ec_digest_map(auth_res, ho);
}
std::list<pg_shard_t> auth_list; // out "param" to
std::set<pg_shard_t> object_errors; // be returned
std::size_t digest_size = 0;
- if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode()) {
+ if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode() &&
+ m_depth == scrub_level_t::deep) {
digest_size = m_pg.get_ec_sinfo().get_k_plus_m();
}
shard_id_map<bufferlist> digests{digest_size};
ho.has_snapset(),
srd);
- if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode()) {
+ if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode()
+ && m_depth == scrub_level_t::deep) {
// Create map containing all data shards except current shard and all
// parity shards Decode the current data shard Add to set<shard_id>
// incorrectly_decoded_shards if the shard did not decode
ceph_assert(auth_length >= shard_length);
int padding = auth_length - shard_length;
if (padding != 0) {
+ dout(20) << fmt::format("{}: Hoid: {}, shard: {}, (B) Padding digest with {} zeros, going from {} to {}", __func__, ho, srd, padding, digest, ceph_crc32c_zeros(digest, padding)) << dendl;
digest = ceph_crc32c_zeros(digest, padding);
}
<< dendl;
}
- if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode()) {
+ if (!m_is_replicated && m_pg.get_ec_supports_crc_encode_decode()
+ && m_depth == scrub_level_t::deep) {
set<shard_id_t> incorrectly_decoded_shards;
shard_id_set shards;
-1, logical_to_ondisk_size(auth_sel.auth_oi.size,
auth_sel.auth_shard.shard));
+ dout(20) << __func__ << fmt::format("Hoid {} (size {}) - Zero_data_crc: 0x{:x}", ho, logical_to_ondisk_size(auth_sel.auth_oi.size, auth_sel.auth_shard.shard), zero_data_crc) << dendl;
+
for (uint32_t i = 0; i < sizeof(zero_data_crc); i++) {
bl.c_str()[i] ^= retrieve_byte(zero_data_crc, i);
}