}
// checksum
- int64_t csum = csum_type.load();
- csum = select_option(
- "csum_type",
- csum,
- [&]() {
- int64_t val;
- if (coll->pool_opts.get(pool_opts_t::CSUM_TYPE, &val)) {
- return std::optional<int64_t>(val);
- }
- return std::optional<int64_t>();
- }
- );
+ int64_t csum = wctx->csum_type;
// compress (as needed) and calc needed space
uint64_t need = 0;
// apply basic csum block size
wctx->csum_order = block_size_order;
+ // checksum
+ int64_t csum = csum_type.load();
+ csum = select_option(
+ "csum_type",
+ csum,
+ [&]() {
+ int64_t val;
+ if (c->pool_opts.get(pool_opts_t::CSUM_TYPE, &val)) {
+ return std::optional<int64_t>(val);
+ }
+ return std::optional<int64_t>();
+ }
+ );
+ wctx->csum_type = csum;
+
// compression parameters
unsigned alloc_hints = o->onode.alloc_hint_flags;
auto cm = select_option(
struct WriteContext {
bool buffered = false; ///< buffered write
bool compress = false; ///< compressed write
- uint64_t target_blob_size = 0; ///< target (max) blob size
+ uint8_t csum_type = 0; ///< checksum type for new blobs
unsigned csum_order = 0; ///< target checksum chunk order
+ uint64_t target_blob_size = 0; ///< target (max) blob size
old_extent_map_t old_extents; ///< must deref these blobs
interval_set<uint64_t> extents_to_gc; ///< extents for garbage collection
buffered = other.buffered;
compress = other.compress;
target_blob_size = other.target_blob_size;
+ csum_type = other.csum_type;
csum_order = other.csum_order;
}
void write(