} else {
for (auto reg : b2r_it->second) {
// determine how much of the blob to read
- uint64_t chunk_size = bptr->blob.get_chunk_size(block_size);
+ uint64_t chunk_size = bptr->blob.get_chunk_size(csum_type != bluestore_blob_t::CSUM_NONE, block_size);
uint64_t r_off = reg.blob_xoffset;
uint64_t r_len = reg.length;
unsigned front = r_off % chunk_size;
const bufferlist& bl) const
{
int bad;
- int r = blob->verify_csum(blob_xoffset, bl, &bad);
+ int r = csum_type != bluestore_blob_t::CSUM_NONE ? blob->verify_csum(blob_xoffset, bl, &bad) :0;
if (r < 0) {
if (r == -1) {
vector<bluestore_pextent_t> pex;
<< " bstart 0x" << std::hex << bstart << std::dec << dendl;
// can we pad our head/tail out with zeros?
- uint64_t chunk_size = b->blob.get_chunk_size(block_size);
+ uint64_t chunk_size = b->blob.get_chunk_size(b->blob.has_csum(), block_size); //blob csum settings to be applied hence ignoring current config settings for csum enable/disable
uint64_t head_pad = P2PHASE(offset, chunk_size);
if (head_pad && o->onode.has_any_lextents(offset - head_pad, chunk_size)) {
head_pad = 0;
}
/// return chunk (i.e. min readable block) size for the blob
- uint64_t get_chunk_size(uint64_t dev_block_size) {
- return has_csum() ? MAX(dev_block_size, get_csum_chunk_size()) : dev_block_size;
+ uint64_t get_chunk_size(bool csum_enabled, uint64_t dev_block_size) {
+ return csum_enabled && has_csum() ? MAX(dev_block_size, get_csum_chunk_size()) : dev_block_size;
}
uint32_t get_csum_chunk_size() const {
return 1 << csum_chunk_order;