o->c->store->cct->_conf->bluestore_extent_map_inline_shard_prealloc_size) {
}
+void BlueStore::ExtentMap::dup(BlueStore* b, TransContext* txc,
+ CollectionRef& c, OnodeRef& oldo, OnodeRef& newo, uint64_t& srcoff,
+ uint64_t& length, uint64_t& dstoff) {
+
+ auto cct = onode->c->store->cct;
+ vector<BlobRef> id_to_blob(oldo->extent_map.extent_map.size());
+ for (auto& e : oldo->extent_map.extent_map) {
+ e.blob->last_encoded_id = -1;
+ }
+
+ int n = 0;
+ uint64_t end = srcoff + length;
+ uint32_t dirty_range_begin = 0;
+ uint32_t dirty_range_end = 0;
+ bool src_dirty = false;
+ for (auto ep = oldo->extent_map.seek_lextent(srcoff);
+ ep != oldo->extent_map.extent_map.end();
+ ++ep) {
+ auto& e = *ep;
+ if (e.logical_offset >= end) {
+ break;
+ }
+ dout(20) << __func__ << " src " << e << dendl;
+ BlobRef cb;
+ bool blob_duped = true;
+ if (e.blob->last_encoded_id >= 0) {
+ cb = id_to_blob[e.blob->last_encoded_id];
+ blob_duped = false;
+ } else {
+ // dup the blob
+ const bluestore_blob_t& blob = e.blob->get_blob();
+ // make sure it is shared
+ if (!blob.is_shared()) {
+ c->make_blob_shared(b->_assign_blobid(txc), e.blob);
+ if (!src_dirty) {
+ src_dirty = true;
+ dirty_range_begin = e.logical_offset;
+ }
+ assert(e.logical_end() > 0);
+ // -1 to exclude next potential shard
+ dirty_range_end = e.logical_end() - 1;
+ } else {
+ c->load_shared_blob(e.blob->shared_blob);
+ }
+ cb = new Blob();
+ e.blob->last_encoded_id = n;
+ id_to_blob[n] = cb;
+ e.blob->dup(*cb);
+ // bump the extent refs on the copied blob's extents
+ for (auto p : blob.get_extents()) {
+ if (p.is_valid()) {
+ e.blob->shared_blob->get_ref(p.offset, p.length);
+ }
+ }
+ txc->write_shared_blob(e.blob->shared_blob);
+ dout(20) << __func__ << " new " << *cb << dendl;
+ }
+
+ int skip_front, skip_back;
+ if (e.logical_offset < srcoff) {
+ skip_front = srcoff - e.logical_offset;
+ } else {
+ skip_front = 0;
+ }
+ if (e.logical_end() > end) {
+ skip_back = e.logical_end() - end;
+ } else {
+ skip_back = 0;
+ }
+
+ Extent* ne = new Extent(e.logical_offset + skip_front + dstoff - srcoff,
+ e.blob_offset + skip_front, e.length - skip_front - skip_back, cb);
+ newo->extent_map.extent_map.insert(*ne);
+ ne->blob->get_ref(c.get(), ne->blob_offset, ne->length);
+ // fixme: we may leave parts of new blob unreferenced that could
+ // be freed (relative to the shared_blob).
+ txc->statfs_delta.stored() += ne->length;
+ if (e.blob->get_blob().is_compressed()) {
+ txc->statfs_delta.compressed_original() += ne->length;
+ if (blob_duped) {
+ txc->statfs_delta.compressed() +=
+ cb->get_blob().get_compressed_payload_length();
+ }
+ }
+ dout(20) << __func__ << " dst " << *ne << dendl;
+ ++n;
+ }
+ if (src_dirty) {
+ oldo->extent_map.dirty_range(dirty_range_begin,
+ dirty_range_end - dirty_range_begin);
+ txc->write_onode(oldo);
+ }
+ txc->write_onode(newo);
+
+ if (dstoff + length > newo->onode.size) {
+ newo->onode.size = dstoff + length;
+ }
+ newo->extent_map.dirty_range(dstoff, length);
+}
void BlueStore::ExtentMap::update(KeyValueDB::Transaction t,
bool force)
{
_dump_onode(oldo);
_dump_onode(newo);
- // hmm, this could go into an ExtentMap::dup() method.
- vector<BlobRef> id_to_blob(oldo->extent_map.extent_map.size());
- for (auto &e : oldo->extent_map.extent_map) {
- e.blob->last_encoded_id = -1;
- }
- int n = 0;
- uint64_t end = srcoff + length;
- uint32_t dirty_range_begin = 0;
- uint32_t dirty_range_end = 0;
- bool src_dirty = false;
- for (auto ep = oldo->extent_map.seek_lextent(srcoff);
- ep != oldo->extent_map.extent_map.end();
- ++ep) {
- auto& e = *ep;
- if (e.logical_offset >= end) {
- break;
- }
- dout(20) << __func__ << " src " << e << dendl;
- BlobRef cb;
- bool blob_duped = true;
- if (e.blob->last_encoded_id >= 0) {
- // blob is already duped
- cb = id_to_blob[e.blob->last_encoded_id];
- blob_duped = false;
- } else {
- // dup the blob
- const bluestore_blob_t& blob = e.blob->get_blob();
- // make sure it is shared
- if (!blob.is_shared()) {
- c->make_blob_shared(_assign_blobid(txc), e.blob);
- if (!src_dirty) {
- src_dirty = true;
- dirty_range_begin = e.logical_offset;
- }
- assert(e.logical_end() > 0);
- // -1 to exclude next potential shard
- dirty_range_end = e.logical_end() - 1;
- } else {
- c->load_shared_blob(e.blob->shared_blob);
- }
- cb = new Blob();
- e.blob->last_encoded_id = n;
- id_to_blob[n] = cb;
- e.blob->dup(*cb);
- // bump the extent refs on the copied blob's extents
- for (auto p : blob.get_extents()) {
- if (p.is_valid()) {
- e.blob->shared_blob->get_ref(p.offset, p.length);
- }
- }
- txc->write_shared_blob(e.blob->shared_blob);
- dout(20) << __func__ << " new " << *cb << dendl;
- }
- // dup extent
- int skip_front, skip_back;
- if (e.logical_offset < srcoff) {
- skip_front = srcoff - e.logical_offset;
- } else {
- skip_front = 0;
- }
- if (e.logical_end() > end) {
- skip_back = e.logical_end() - end;
- } else {
- skip_back = 0;
- }
- Extent *ne = new Extent(e.logical_offset + skip_front + dstoff - srcoff,
- e.blob_offset + skip_front,
- e.length - skip_front - skip_back, cb);
- newo->extent_map.extent_map.insert(*ne);
- ne->blob->get_ref(c.get(), ne->blob_offset, ne->length);
- // fixme: we may leave parts of new blob unreferenced that could
- // be freed (relative to the shared_blob).
- txc->statfs_delta.stored() += ne->length;
- if (e.blob->get_blob().is_compressed()) {
- txc->statfs_delta.compressed_original() += ne->length;
- if (blob_duped){
- txc->statfs_delta.compressed() +=
- cb->get_blob().get_compressed_payload_length();
- }
- }
- dout(20) << __func__ << " dst " << *ne << dendl;
- ++n;
- }
- if (src_dirty) {
- oldo->extent_map.dirty_range(dirty_range_begin,
- dirty_range_end - dirty_range_begin);
- txc->write_onode(oldo);
- }
- txc->write_onode(newo);
+ oldo->extent_map.dup(this, txc, c, oldo, newo, srcoff, length, dstoff);
- if (dstoff + length > newo->onode.size) {
- newo->onode.size = dstoff + length;
- }
- newo->extent_map.dirty_range(dstoff, length);
_dump_onode(oldo);
_dump_onode(newo);
return 0;