}
int ECBackend::be_deep_scrub(
+ const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
if (stride % sinfo.get_chunk_size())
stride += sinfo.get_chunk_size() - (stride % sinfo.get_chunk_size());
+ auto& perf_logger = *(get_parent()->get_logger());
+ perf_logger.inc(io_counters.read_cnt);
bufferlist bl;
r = switcher->store->read(
switcher->ch,
if (r > 0) {
pos.data_hash << bl;
}
+ perf_logger.inc(io_counters.read_bytes, r);
pos.data_pos += r;
if (r == (int)stride) {
return -EINPROGRESS;
bool auto_repair_supported() const { return true; }
int be_deep_scrub(
+ const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
}
int ECBackendL::be_deep_scrub(
+ const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
if (stride % sinfo.get_chunk_size())
stride += sinfo.get_chunk_size() - (stride % sinfo.get_chunk_size());
+ auto& perf_logger = *(get_parent()->get_logger());
+ perf_logger.inc(io_counters.read_cnt);
bufferlist bl;
r = switcher->store->read(
switcher->ch,
if (r > 0) {
pos.data_hash << bl;
}
+ perf_logger.inc(io_counters.read_bytes, r);
pos.data_pos += r;
if (r == (int)stride) {
return -EINPROGRESS;
bool auto_repair_supported() const { return true; }
int be_deep_scrub(
+ const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
return legacy.be_get_ondisk_size(logical_size);
}
- int be_deep_scrub(const hobject_t &oid, ScrubMap &map, ScrubMapBuilder &pos
- , ScrubMap::object &o)
+ int be_deep_scrub(
+ const Scrub::ScrubCounterSet &io_counters,
+ const hobject_t &oid, ScrubMap &map, ScrubMapBuilder &pos,
+ ScrubMap::object &o) override
{
if (is_optimized()) {
- return optimized.be_deep_scrub(oid, map, pos, o);
+ return optimized.be_deep_scrub(io_counters, oid, map, pos, o);
}
- return legacy.be_deep_scrub(oid, map, pos, o);
+ return legacy.be_deep_scrub(io_counters, oid, map, pos, o);
}
unsigned get_ec_data_chunk_count() const override
}
int PGBackend::be_scan_list(
+ const Scrub::ScrubCounterSet& io_counters,
ScrubMap &map,
ScrubMapBuilder &pos)
{
ceph_assert(!pos.done());
ceph_assert(pos.pos < pos.ls.size());
hobject_t& poid = pos.ls[pos.pos];
+ auto& perf_logger = *(get_parent()->get_logger());
int r = 0;
ScrubMap::object &o = map.objects[poid];
if (!pos.metadata_done) {
+ perf_logger.inc(io_counters.stats_cnt);
struct stat st;
r = store->stat(
ch,
true);
if (r == 0) {
+ perf_logger.inc(io_counters.getattr_cnt);
o.size = st.st_size;
ceph_assert(!o.negative);
r = store->getattrs(
}
if (pos.deep) {
- r = be_deep_scrub(poid, map, pos, o);
+ r = be_deep_scrub(io_counters, poid, map, pos, o);
if (r == -EINPROGRESS) {
return -EINPROGRESS;
} else if (r != 0) {
#include "common/WorkQueue.h"
#include "include/Context.h"
#include "os/ObjectStore.h"
+#include "osd/scrubber_common.h"
#include "common/LogClient.h"
#include <string>
#include "PGTransaction.h"
Context *on_complete, bool fast_read = false) = 0;
virtual bool auto_repair_supported() const = 0;
+
int be_scan_list(
+ const Scrub::ScrubCounterSet& io_counters,
ScrubMap &map,
ScrubMapBuilder &pos);
shard_id_t shard_id) const = 0;
virtual int be_deep_scrub(
+ [[maybe_unused]] const Scrub::ScrubCounterSet& io_counters,
const hobject_t &oid,
ScrubMap &map,
ScrubMapBuilder &pos,
pg_committed_to,
true,
op_t);
-
+
op_t.register_on_commit(
parent->bless_context(
new C_OSD_OnOpCommit(this, &op)));
}
int ReplicatedBackend::be_deep_scrub(
+ const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
ScrubMap::object &o)
{
dout(10) << __func__ << " " << poid << " pos " << pos << dendl;
- int r;
- uint32_t fadvise_flags = CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
- CEPH_OSD_OP_FLAG_BYPASS_CLEAN_CACHE;
+ auto& perf_logger = *(get_parent()->get_logger());
+ const uint32_t fadvise_flags = CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
+ CEPH_OSD_OP_FLAG_BYPASS_CLEAN_CACHE;
utime_t sleeptime;
sleeptime.set_from_double(cct->_conf->osd_debug_deep_scrub_sleep);
sleeptime.sleep();
}
+ int r{0};
ceph_assert(poid == pos.ls[pos.pos]);
if (!pos.data_done()) {
if (pos.data_pos == 0) {
const uint64_t stride = cct->_conf->osd_deep_scrub_stride;
+ perf_logger.inc(io_counters.read_cnt);
bufferlist bl;
r = store->read(
ch,
if (r > 0) {
pos.data_hash << bl;
}
+ perf_logger.inc(io_counters.read_bytes, r);
pos.data_pos += r;
if (static_cast<uint64_t>(r) == stride) {
dout(20) << __func__ << " " << poid << " more data, digest so far 0x"
if (pos.omap_pos.empty()) {
pos.omap_hash = -1;
+ perf_logger.inc(io_counters.omapgetheader_cnt);
bufferlist hdrbl;
r = store->omap_get_header(
ch,
bool encoded = false;
dout(25) << "CRC header " << cleanbin(hdrbl, encoded, true) << dendl;
pos.omap_hash = hdrbl.crc32c(pos.omap_hash);
+ perf_logger.inc(io_counters.omapgetheader_bytes, hdrbl.length());
}
}
// omap
+
+ perf_logger.inc(io_counters.omapget_cnt);
using omap_iter_seek_t = ObjectStore::omap_iter_seek_t;
auto result = store->omap_iterate(
ch,
return -EINPROGRESS;
}
+ // we have the full omap now. Finalize the perf counting
+ perf_logger.inc(io_counters.omapget_bytes, pos.omap_bytes);
+
if (pos.omap_keys > cct->_conf->
osd_deep_scrub_large_omap_object_key_threshold ||
pos.omap_bytes > cct->_conf->
int be_deep_scrub(
+ const Scrub::ScrubCounterSet& io_counters,
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
// scan objects
while (!pos.done()) {
-
- int r = m_pg->get_pgbackend()->be_scan_list(map, pos);
+ int r =
+ m_pg->get_pgbackend()->be_scan_list(get_unlabeled_counters(), map, pos);
dout(30) << __func__ << " BE returned " << r << dendl;
if (r == -EINPROGRESS) {
dout(20) << __func__ << " in progress" << dendl;