OPTION(bluestore_compression_algorithm, OPT_STR, "snappy")
OPTION(bluestore_compression_min_blob_size, OPT_U32, 256*1024)
OPTION(bluestore_compression_max_blob_size, OPT_U32, 4*1024*1024)
+OPTION(bluestore_gc_max_blob_depth, OPT_U32, 3)
+OPTION(bluestore_gc_merge_data, OPT_BOOL, true)
/*
* Require the net gain of compression at least to be at this ratio,
* otherwise we don't compress.
#define BLOBID_FLAG_ZEROOFFSET 0x2 // blob_offset is 0
#define BLOBID_FLAG_SAMELENGTH 0x4 // length matches previous extent
#define BLOBID_FLAG_SPANNING 0x8 // has spanning blob id
-#define BLOBID_SHIFT_BITS 4
+#define BLOBID_FLAG_BLOB_DEPTH 0x16 // has blob overlapping count of 1
+#define BLOBID_SHIFT_BITS 5
/*
* object name key structure
} else {
prev_len = p->length;
}
+ if (p->blob_depth == 1) {
+ blobid |= BLOBID_FLAG_BLOB_DEPTH;
+ }
small_encode_varint(blobid, bl);
if ((blobid & BLOBID_FLAG_CONTIGUOUS) == 0) {
small_encode_varint_lowz(p->logical_offset - pos, bl);
small_encode_varint_lowz(p->length, bl);
}
pos = p->logical_offset + p->length;
+ if ((blobid & BLOBID_FLAG_BLOB_DEPTH) == 0) {
+ small_encode_varint_lowz(p->blob_depth, bl);
+ }
if (include_blob) {
p->blob->encode(bl);
}
small_decode_varint_lowz(prev_len, p);
}
le->length = prev_len;
+
+ if ((blobid & BLOBID_FLAG_BLOB_DEPTH) == 0) {
+ small_decode_varint_lowz(le->blob_depth, p);
+ } else {
+ le->blob_depth = 1;
+ }
if (blobid & BLOBID_FLAG_SPANNING) {
le->blob = get_spanning_blob(blobid >> BLOBID_SHIFT_BITS);
} else {
// split and deref middle
uint64_t front = offset - p->logical_offset;
old_extents->insert(
- *new Extent(offset, p->blob_offset + front, length, p->blob));
+ *new Extent(offset, p->blob_offset + front, length, p->blob_depth, p->blob));
add(end,
p->blob_offset + front + length,
- p->length - front - length,
+ p->length - front - length, p->blob_depth,
p->blob);
p->length = front;
break;
assert(p->logical_offset + p->length > offset); // else seek_lextent bug
uint64_t keep = offset - p->logical_offset;
old_extents->insert(*new Extent(offset, p->blob_offset + keep,
- p->length - keep, p->blob));
+ p->length - keep, p->blob_depth, p->blob));
p->length = keep;
++p;
continue;
if (p->logical_offset + p->length <= end) {
// deref whole lextent
old_extents->insert(*new Extent(p->logical_offset, p->blob_offset,
- p->length, p->blob));
+ p->length, p->blob_depth, p->blob));
rm(p++);
continue;
}
// deref head
uint64_t keep = (p->logical_offset + p->length) - end;
old_extents->insert(*new Extent(p->logical_offset, p->blob_offset,
- p->length - keep, p->blob));
- add(end, p->blob_offset + p->length - keep, keep, p->blob);
+ p->length - keep, p->blob_depth, p->blob));
+ add(end, p->blob_offset + p->length - keep, keep, p->blob_depth, p->blob);
rm(p);
break;
}
BlueStore::Extent *BlueStore::ExtentMap::set_lextent(
uint64_t logical_offset,
- uint64_t offset, uint64_t length, BlobRef b,
+ uint64_t offset, uint64_t length, uint8_t blob_depth, BlobRef b,
extent_map_t *old_extents)
{
punch_hole(logical_offset, length, old_extents);
b->ref_map.get(offset, length);
- Extent *le = new Extent(logical_offset, offset, length, b);
+ Extent *le = new Extent(logical_offset, offset, length, blob_depth, b);
extent_map.insert(*le);
return le;
}
b.add_u64(l_bluestore_txc, "bluestore_txc", "Transactions committed");
b.add_u64(l_bluestore_onode_reshard, "bluestore_onode_reshard",
"Onode extent map reshard events");
+ b.add_u64(l_bluestore_gc_bytes, "bluestore_gc_bytes", "garbage collected bytes");
logger = b.create_perf_counters();
g_ceph_context->get_perfcounters_collection()->add(logger);
}
region_t(uint64_t offset, uint64_t b_offs, uint64_t len)
: logical_offset(offset),
blob_xoffset(b_offs),
- length(len) {}
+ length(len){}
region_t(const region_t& from)
: logical_offset(from.logical_offset),
blob_xoffset(from.blob_xoffset),
- length(from.length) {}
+ length(from.length){}
friend ostream& operator<<(ostream& out, const region_t& r) {
return out << "0x" << std::hex << r.logical_offset << ":"
});
b->dirty_blob().calc_csum(b_off, padded);
dout(20) << __func__ << " lex old " << *ep << dendl;
- Extent *le = o->extent_map.set_lextent(offset, b_off + head_pad, length, b,
- &wctx->old_extents);
+ Extent *le = o->extent_map.set_lextent(offset, b_off + head_pad, length,
+ wctx->blob_depth, b, &wctx->old_extents);
b->dirty_blob().mark_used(le->blob_offset, le->length);
txc->statfs_delta.stored() += le->length;
dout(20) << __func__ << " lex " << *le << dendl;
dout(20) << __func__ << " wal write 0x" << std::hex << b_off << "~"
<< b_len << std::dec << " of mutable " << *b
<< " at " << op->extents << dendl;
- Extent *le = o->extent_map.set_lextent(offset, offset - bstart, length, b,
- &wctx->old_extents);
+ Extent *le = o->extent_map.set_lextent(offset, offset - bstart, length,
+ wctx->blob_depth, b, &wctx->old_extents);
b->dirty_blob().mark_used(le->blob_offset, le->length);
txc->statfs_delta.stored() += le->length;
dout(20) << __func__ << " lex " << *le << dendl;
wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
_pad_zeros(&bl, &b_off, block_size);
Extent *le = o->extent_map.set_lextent(offset, P2PHASE(offset, alloc_len),
- length, b, &wctx->old_extents);
+ length, wctx->blob_depth, b, &wctx->old_extents);
txc->statfs_delta.stored() += le->length;
dout(20) << __func__ << " lex " << *le << dendl;
wctx->write(b, alloc_len, b_off, bl, true);
blp.copy(l, t);
_buffer_cache_write(txc, b, 0, t, wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
wctx->write(b, l, 0, t, false);
- Extent *le = o->extent_map.set_lextent(offset, 0, l, b, &wctx->old_extents);
+ Extent *le = o->extent_map.set_lextent(offset, 0, l, wctx->blob_depth,
+ b, &wctx->old_extents);
txc->statfs_delta.stored() += l;
dout(20) << __func__ << " lex " << *le << dendl;
offset += l;
}
}
+bool BlueStore::_blobs_need_garbage_collection(
+ OnodeRef o,
+ uint64_t start_offset,
+ uint64_t end_offset,
+ uint8_t *blob_depth,
+ uint64_t *gc_start_offset,
+ uint64_t *gc_end_offset)
+{
+ uint8_t depth = 0;
+ bool head_overlap = false;
+ bool tail_overlap = false;
+
+ *gc_start_offset = start_offset;
+ *gc_end_offset = end_offset;
+ *blob_depth = 1;
+
+ auto hp = o->extent_map.seek_lextent(start_offset);
+
+ if (hp != o->extent_map.extent_map.end() && hp->logical_offset < start_offset &&
+ start_offset < hp->logical_offset + hp->length) {
+ depth = hp->blob_depth;
+ head_overlap = true;
+ }
+
+ auto tp = o->extent_map.seek_lextent(end_offset);
+
+ if (tp != o->extent_map.extent_map.end() && tp->logical_offset < end_offset &&
+ end_offset < tp->logical_offset + tp->length) {
+ tail_overlap = true;
+ if (depth < tp->blob_depth) {
+ depth = tp->blob_depth;
+ }
+ }
+
+ if (depth >= g_conf->bluestore_gc_max_blob_depth) {
+ if (head_overlap) {
+ auto hp_next = hp;
+ while (hp != o->extent_map.extent_map.begin() && hp->blob_depth > 1) {
+ hp_next = hp;
+ --hp;
+ if (hp->logical_offset + hp->length != hp_next->logical_offset) {
+ hp = hp_next;
+ break;
+ }
+ }
+ *gc_start_offset = hp->logical_offset;
+ }
+ if (tail_overlap) {
+ auto tp_prev = tp;
+
+ while (tp->blob_depth > 1) {
+ tp_prev = tp;
+ tp++;
+ if (tp == o->extent_map.extent_map.end() ||
+ (tp_prev->logical_offset + tp_prev->length) != tp->logical_offset) {
+ tp = tp_prev;
+ break;
+ }
+ }
+ *gc_end_offset = tp->logical_offset + tp_prev->length;
+ }
+ }
+ if (depth >= g_conf->bluestore_gc_max_blob_depth) {
+ return true;
+ } else {
+ *blob_depth = 1 + depth;
+ return false;
+ }
+}
+
+void BlueStore::_do_write_data(
+ TransContext *txc,
+ CollectionRef& c,
+ OnodeRef o,
+ uint64_t offset,
+ uint64_t length,
+ bufferlist& bl,
+ WriteContext *wctx)
+{
+ uint64_t end = offset + length;
+ bufferlist::iterator p = bl.begin();
+
+ if (offset / min_alloc_size == (end - 1) / min_alloc_size &&
+ (length != min_alloc_size)) {
+ // we fall within the same block
+ _do_write_small(txc, c, o, offset, length, p, wctx);
+ } else {
+ uint64_t head_offset, head_length;
+ uint64_t middle_offset, middle_length;
+ uint64_t tail_offset, tail_length;
+
+ head_offset = offset;
+ head_length = P2NPHASE(offset, min_alloc_size);
+
+ tail_offset = P2ALIGN(end, min_alloc_size);
+ tail_length = P2PHASE(end, min_alloc_size);
+
+ middle_offset = head_offset + head_length;
+ middle_length = length - head_length - tail_length;
+
+ if (head_length) {
+ _do_write_small(txc, c, o, head_offset, head_length, p, wctx);
+ }
+
+ if (middle_length) {
+ _do_write_big(txc, c, o, middle_offset, middle_length, p, wctx);
+ }
+
+ if (tail_length) {
+ _do_write_small(txc, c, o, tail_offset, tail_length, p, wctx);
+ }
+ }
+}
int BlueStore::_do_write(
TransContext *txc,
CollectionRef& c,
<< " comp_blob_size 0x" << std::hex << wctx.comp_blob_size
<< std::dec << dendl;
- o->extent_map.fault_range(db, offset, length);
-
- bufferlist::iterator p = bl.begin();
- if (offset / min_alloc_size == (end - 1) / min_alloc_size &&
- (length != min_alloc_size)) {
- // we fall within the same block
- _do_write_small(txc, c, o, offset, length, p, &wctx);
- } else {
- uint64_t head_offset, head_length;
- uint64_t middle_offset, middle_length;
- uint64_t tail_offset, tail_length;
-
- head_offset = offset;
- head_length = P2NPHASE(offset, min_alloc_size);
-
- tail_offset = P2ALIGN(end, min_alloc_size);
- tail_length = P2PHASE(end, min_alloc_size);
-
- middle_offset = head_offset + head_length;
- middle_length = length - head_length - tail_length;
-
- if (head_length) {
- _do_write_small(txc, c, o, head_offset, head_length, p, &wctx);
- }
-
- if (middle_length) {
- _do_write_big(txc, c, o, middle_offset, middle_length, p, &wctx);
+ uint64_t gc_start_offset = offset, gc_end_offset = end;
+
+ if (_blobs_need_garbage_collection(o, offset, end, &wctx.blob_depth,
+ &gc_start_offset, &gc_end_offset) == true) {
+ // we need garbage collection of blobs.
+ if (offset > gc_start_offset) {
+ bufferlist head_bl;
+ size_t read_len = offset - gc_start_offset;
+ int r = _do_read(c.get(), o, gc_start_offset, read_len, head_bl, 0);
+ assert(r == (int)read_len);
+ if (g_conf->bluestore_gc_merge_data == true) {
+ head_bl.claim_append(bl);
+ bl.swap(head_bl);
+ offset = gc_start_offset;
+ } else {
+ o->extent_map.fault_range(db, gc_start_offset, read_len);
+ _do_write_data(txc, c, o, gc_start_offset, read_len, head_bl, &wctx);
+ }
+ logger->inc(l_bluestore_gc_bytes, read_len);
}
- if (tail_length) {
- _do_write_small(txc, c, o, tail_offset, tail_length, p, &wctx);
+ if (end < gc_end_offset) {
+ bufferlist tail_bl;
+ size_t read_len = gc_end_offset - end;
+ int r = _do_read(c.get(), o, end, read_len, tail_bl, 0);
+ assert(r == (int)read_len);
+ if (g_conf->bluestore_gc_merge_data == true) {
+ bl.claim_append(tail_bl);
+ length += read_len;
+ end += read_len;
+ } else {
+ o->extent_map.fault_range(db, end, read_len);
+ _do_write_data(txc, c, o, end, read_len, tail_bl, &wctx);
+ }
+ logger->inc(l_bluestore_gc_bytes, read_len);
}
}
+ o->extent_map.fault_range(db, offset, length);
+ _do_write_data(txc, c, o, offset, length, bl, &wctx);
r = _do_alloc_write(txc, &wctx);
if (r < 0) {
}
Extent *ne = new Extent(e.logical_offset + skip_front + dstoff - srcoff,
e.blob_offset + skip_front,
- e.length - skip_front - skip_back, cb);
+ e.length - skip_front - skip_back, e.blob_depth, cb);
newo->extent_map.extent_map.insert(*ne);
ne->blob->ref_map.get(ne->blob_offset, ne->length);
// fixme: we may leave parts of new blob unreferenced that could
l_bluestore_write_small_new,
l_bluestore_txc,
l_bluestore_onode_reshard,
+ l_bluestore_gc_bytes,
l_bluestore_last
};
uint32_t logical_offset = 0; ///< logical offset
uint32_t blob_offset = 0; ///< blob offset
uint32_t length = 0; ///< length
+ uint8_t blob_depth; /// blob overlapping count
BlobRef blob; ///< the blob with our data
explicit Extent() {}
explicit Extent(uint32_t lo) : logical_offset(lo) {}
- Extent(uint32_t lo, uint32_t o, uint32_t l, BlobRef& b)
- : logical_offset(lo), blob_offset(o), length(l), blob(b) {}
+ Extent(uint32_t lo, uint32_t o, uint32_t l, uint8_t bd, BlobRef& b)
+ : logical_offset(lo), blob_offset(o), length(l), blob_depth(bd), blob(b){}
// comparators for intrusive_set
friend bool operator<(const Extent &a, const Extent &b) {
extent_map_t::iterator seek_lextent(uint64_t offset);
/// add a new Extent
- void add(uint32_t lo, uint32_t o, uint32_t l, BlobRef& b) {
- extent_map.insert(*new Extent(lo, o, l, b));
+ void add(uint32_t lo, uint32_t o, uint32_t l, uint8_t bd, BlobRef& b) {
+ extent_map.insert(*new Extent(lo, o, l, bd, b));
}
/// remove (and delete) an Extent
/// put new lextent into lextent_map overwriting existing ones if
/// any and update references accordingly
Extent *set_lextent(uint64_t logical_offset,
- uint64_t offset, uint64_t length, BlobRef b,
- extent_map_t *old_extents);
+ uint64_t offset, uint64_t length, uint8_t blob_depth,
+ BlobRef b, extent_map_t *old_extents);
};
bool buffered = false; ///< buffered write
bool compress = false; ///< compressed write
uint64_t comp_blob_size = 0; ///< target compressed blob size
+ uint8_t blob_depth = 0; ///< depth of the logical extent
unsigned csum_order = 0; ///< target checksum chunk order
extent_map_t old_extents; ///< must deref these blobs
uint32_t fadvise_flags);
void _pad_zeros(bufferlist *bl, uint64_t *offset,
uint64_t chunk_size);
+
+ bool _blobs_need_garbage_collection(OnodeRef o,
+ uint64_t start_offset,
+ uint64_t end_offset,
+ uint8_t *blob_depth,
+ uint64_t *gc_start_offset,
+ uint64_t *gc_end_offset);
+
int _do_write(TransContext *txc,
CollectionRef &c,
OnodeRef o,
uint64_t offset, uint64_t length,
bufferlist& bl,
uint32_t fadvise_flags);
+ void _do_write_data(TransContext *txc,
+ CollectionRef& c,
+ OnodeRef o,
+ uint64_t offset,
+ uint64_t length,
+ bufferlist& bl,
+ WriteContext *wctx);
+
int _touch(TransContext *txc,
CollectionRef& c,
OnodeRef& o);
g_ceph_context->_conf->apply_changes(NULL);
}
+TEST_P(StoreTest, garbageCollection) {
+ ObjectStore::Sequencer osr("test");
+ int r;
+ int64_t waste1, waste2;
+ coll_t cid;
+ int buf_len = 256 * 1024;
+ if (string(GetParam()) != "bluestore")
+ return;
+
+ g_conf->set_val("bluestore_compression", "force");
+ g_conf->set_val("bluestore_merge_gc_data", "true");
+ g_ceph_context->_conf->apply_changes(NULL);
+
+ ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
+ {
+ bufferlist in;
+ r = store->read(cid, hoid, 0, 5, in);
+ ASSERT_EQ(-ENOENT, r);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.create_collection(cid, 0);
+ cerr << "Creating collection " << cid << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+
+ std::string data;
+ data.resize(buf_len);
+
+ {
+ {
+ bool exists = store->exists(cid, hoid);
+ ASSERT_TRUE(!exists);
+
+ ObjectStore::Transaction t;
+ t.touch(cid, hoid);
+ cerr << "Creating object " << hoid << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ exists = store->exists(cid, hoid);
+ ASSERT_EQ(true, exists);
+ }
+ bufferlist bl;
+
+ for(size_t i = 0; i < data.size(); i++)
+ data[i] = 'R';
+
+ bl.append(data);
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 0, bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, buf_len - 4096, bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 2 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ struct store_statfs_t statfs;
+ int r = store->statfs(&statfs);
+ ASSERT_EQ(r, 0);
+ waste1 = statfs.allocated - statfs.stored;
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 3 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ struct store_statfs_t statfs;
+ int r = store->statfs(&statfs);
+ ASSERT_EQ(r, 0);
+ waste2 = statfs.allocated - statfs.stored;
+ ASSERT_GE(waste1, waste2);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.remove(cid, hoid);
+ cerr << "Cleaning" << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ }
+ {
+ {
+ bool exists = store->exists(cid, hoid);
+ ASSERT_TRUE(!exists);
+
+ ObjectStore::Transaction t;
+ t.touch(cid, hoid);
+ cerr << "Creating object " << hoid << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ exists = store->exists(cid, hoid);
+ ASSERT_EQ(true, exists);
+ }
+ bufferlist bl;
+
+ for(size_t i = 0; i < data.size(); i++)
+ data[i] = i % 256;
+ bl.append(data);
+
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 3 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 2 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, buf_len - 4096, bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ struct store_statfs_t statfs;
+ int r = store->statfs(&statfs);
+ ASSERT_EQ(r, 0);
+ waste1 = statfs.allocated - statfs.stored;
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 50 * 1024, bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ struct store_statfs_t statfs;
+ int r = store->statfs(&statfs);
+ ASSERT_EQ(r, 0);
+ waste2 = statfs.allocated - statfs.stored;
+ ASSERT_GE(waste1, waste2);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.remove(cid, hoid);
+ cerr << "Cleaning" << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ }
+ {
+ {
+ bool exists = store->exists(cid, hoid);
+ ASSERT_TRUE(!exists);
+
+ ObjectStore::Transaction t;
+ t.touch(cid, hoid);
+ cerr << "Creating object " << hoid << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ exists = store->exists(cid, hoid);
+ ASSERT_EQ(true, exists);
+ }
+ bufferlist bl;
+ for(size_t i = 0; i < data.size(); i++)
+ data[i] = i % 256;
+ bl.append(data);
+
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 5 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 4 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 3 * (buf_len - 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, buf_len - 4096, bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 40 * 1024, bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ struct store_statfs_t statfs;
+ int r = store->statfs(&statfs);
+ ASSERT_EQ(r, 0);
+ waste1 = statfs.allocated - statfs.stored;
+ }
+ {
+ ObjectStore::Transaction t;
+ t.write(cid, hoid, 5 * (buf_len - 3 * 4096), bl.length(), bl);
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ struct store_statfs_t statfs;
+ int r = store->statfs(&statfs);
+ ASSERT_EQ(r, 0);
+ waste2 = statfs.allocated - statfs.stored;
+ ASSERT_GE(waste1, waste2);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.remove(cid, hoid);
+ t.remove_collection(cid);
+ cerr << "Cleaning" << std::endl;
+ r = apply_transaction(store, &osr, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ }
+ g_conf->set_val("bluestore_compression", "none");
+ g_ceph_context->_conf->apply_changes(NULL);
+}
+
TEST_P(StoreTest, SimpleObjectTest) {
ObjectStore::Sequencer osr("test");
int r;
ASSERT_EQ(em.extent_map.end(), em.find_lextent(0));
ASSERT_EQ(em.extent_map.end(), em.find_lextent(100));
- em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br));
+ em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, br));
auto a = em.find(100);
ASSERT_EQ(em.extent_map.end(), em.find_lextent(0));
ASSERT_EQ(em.extent_map.end(), em.find_lextent(99));
ASSERT_EQ(a, em.find_lextent(199));
ASSERT_EQ(em.extent_map.end(), em.find_lextent(200));
- em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br));
+ em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, 1, br));
auto b = em.find(200);
ASSERT_EQ(em.extent_map.end(), em.find_lextent(0));
ASSERT_EQ(em.extent_map.end(), em.find_lextent(99));
ASSERT_EQ(b, em.find_lextent(299));
ASSERT_EQ(em.extent_map.end(), em.find_lextent(300));
- em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br));
+ em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, 1, br));
auto d = em.find(400);
ASSERT_EQ(em.extent_map.end(), em.find_lextent(0));
ASSERT_EQ(em.extent_map.end(), em.find_lextent(99));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100));
- em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br));
+ em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, br));
auto a = em.find(100);
ASSERT_EQ(a, em.seek_lextent(0));
ASSERT_EQ(a, em.seek_lextent(99));
ASSERT_EQ(a, em.seek_lextent(199));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200));
- em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br));
+ em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, 1, br));
auto b = em.find(200);
ASSERT_EQ(a, em.seek_lextent(0));
ASSERT_EQ(a, em.seek_lextent(99));
ASSERT_EQ(b, em.seek_lextent(299));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300));
- em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br));
+ em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, 1, br));
auto d = em.find(400);
ASSERT_EQ(a, em.seek_lextent(0));
ASSERT_EQ(a, em.seek_lextent(99));
ASSERT_FALSE(em.has_any_lextents(0, 1000));
ASSERT_FALSE(em.has_any_lextents(1000, 1000));
- em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b));
+ em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, b));
ASSERT_FALSE(em.has_any_lextents(0, 50));
ASSERT_FALSE(em.has_any_lextents(0, 100));
ASSERT_FALSE(em.has_any_lextents(50, 50));
ASSERT_TRUE(em.has_any_lextents(199, 2));
ASSERT_FALSE(em.has_any_lextents(200, 2));
- em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b));
+ em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, 1, b));
ASSERT_TRUE(em.has_any_lextents(199, 1));
ASSERT_TRUE(em.has_any_lextents(199, 2));
ASSERT_TRUE(em.has_any_lextents(200, 2));
ASSERT_TRUE(em.has_any_lextents(299, 1));
ASSERT_FALSE(em.has_any_lextents(300, 1));
- em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b));
+ em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, 1, b));
ASSERT_TRUE(em.has_any_lextents(0, 10000));
ASSERT_TRUE(em.has_any_lextents(199, 1));
ASSERT_FALSE(em.has_any_lextents(300, 1));
BlueStore::BlobRef b2(new BlueStore::Blob);
BlueStore::BlobRef b3(new BlueStore::Blob);
- em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1));
- em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
+ em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, 1, b1));
+ em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, b2));
ASSERT_EQ(0, em.compress_extent_map(0, 10000));
ASSERT_EQ(2u, em.extent_map.size());
- em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
+ em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, 1, b2));
ASSERT_EQ(0, em.compress_extent_map(0, 0));
ASSERT_EQ(0, em.compress_extent_map(100000, 1000));
ASSERT_EQ(2, em.compress_extent_map(0, 100000));
ASSERT_EQ(2u, em.extent_map.size());
em.extent_map.erase(em.find(100));
- em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3));
- em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
+ em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, 1, b3));
+ em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, 1, b2));
ASSERT_EQ(0, em.compress_extent_map(0, 1));
ASSERT_EQ(0, em.compress_extent_map(0, 100000));
ASSERT_EQ(4u, em.extent_map.size());
- em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1));
- em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3));
+ em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, 1, b1));
+ em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, 1, b3));
ASSERT_EQ(0, em.compress_extent_map(0, 99));
ASSERT_EQ(0, em.compress_extent_map(800, 1000));
ASSERT_EQ(2, em.compress_extent_map(100, 500));
em.extent_map.erase(em.find(300));
em.extent_map.erase(em.find(500));
em.extent_map.erase(em.find(700));
- em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2));
- em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2));
+ em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, 1, b2));
+ em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, 1, b2));
ASSERT_EQ(1, em.compress_extent_map(0, 1000));
ASSERT_EQ(6u, em.extent_map.size());
}