*/
virtual int64_t allocate(uint64_t want_size, uint64_t alloc_unit,
uint64_t max_alloc_size, int64_t hint,
- AllocExtentVector *extents) = 0;
+ PExtentVector *extents) = 0;
int64_t allocate(uint64_t want_size, uint64_t alloc_unit,
- int64_t hint, AllocExtentVector *extents) {
+ int64_t hint, PExtentVector *extents) {
return allocate(want_size, alloc_unit, want_size, hint, extents);
}
int64_t BitMapAllocator::allocate(
uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size,
- int64_t hint, mempool::bluestore_alloc::vector<AllocExtent> *extents)
+ int64_t hint, PExtentVector *extents)
{
assert(!(alloc_unit % m_block_size));
int64_t BitMapAllocator::allocate_dis(
uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size,
- int64_t hint, mempool::bluestore_alloc::vector<AllocExtent> *extents)
+ int64_t hint, PExtentVector *extents)
{
ExtentList block_list = ExtentList(extents, m_block_size, max_alloc_size);
int64_t nblks = (want_size + m_block_size - 1) / m_block_size;
int64_t allocate_dis(
uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size,
- int64_t hint, mempool::bluestore_alloc::vector<AllocExtent> *extents);
+ int64_t hint, PExtentVector *extents);
public:
BitMapAllocator(CephContext* cct, int64_t device_size, int64_t block_size);
int64_t allocate(
uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size,
- int64_t hint, mempool::bluestore_alloc::vector<AllocExtent> *extents) override;
+ int64_t hint, PExtentVector *extents) override;
void release(
const interval_set<uint64_t>& release_set) override;
}
int BlueFS::reclaim_blocks(unsigned id, uint64_t want,
- AllocExtentVector *extents)
+ PExtentVector *extents)
{
std::unique_lock<std::mutex> l(lock);
dout(1) << __func__ << " bdev " << id
uint64_t left = round_up_to(len, min_alloc_size);
int r = -ENOSPC;
int64_t alloc_len = 0;
- AllocExtentVector extents;
+ PExtentVector extents;
if (alloc[id]) {
r = alloc[id]->reserve(left);
/// reclaim block space
int reclaim_blocks(unsigned bdev, uint64_t want,
- AllocExtentVector *extents);
+ PExtentVector *extents);
void flush(FileWriter *h) {
std::lock_guard<std::mutex> l(lock);
int r = alloc->reserve(gift);
assert(r == 0);
- AllocExtentVector exts;
int64_t alloc_len = alloc->allocate(gift, cct->_conf->bluefs_alloc_size,
- 0, 0, &exts);
+ 0, 0, extents);
if (alloc_len <= 0) {
dout(1) << __func__ << " no allocate on 0x" << std::hex << gift
alloc->unreserve(gift - alloc_len);
alloc->dump();
}
- for (auto& p : exts) {
- bluestore_pextent_t e = bluestore_pextent_t(p);
+ for (auto& e : *extents) {
dout(1) << __func__ << " gifting " << e << " to bluefs" << dendl;
- extents->push_back(e);
}
ret = 1;
}
while (reclaim > 0) {
// NOTE: this will block and do IO.
- AllocExtentVector extents;
+ PExtentVector extents;
int r = bluefs->reclaim_blocks(bluefs_shared_bdev, reclaim,
&extents);
if (r < 0) {
<< dendl;
return r;
}
- AllocExtentVector prealloc;
+ PExtentVector prealloc;
prealloc.reserve(2 * wctx->writes.size());;
int prealloc_left = 0;
prealloc_left = alloc->allocate(
}
}
- AllocExtentVector extents;
+ PExtentVector extents;
int64_t left = final_length;
while (left > 0) {
assert(prealloc_left > 0);
uint64_t min_alloc_size);
/// return a collection of extents to perform GC on
- const vector<AllocExtent>& get_extents_to_collect() const {
+ const vector<bluestore_pextent_t>& get_extents_to_collect() const {
return extents_to_collect;
}
GarbageCollector(CephContext* _cct) : cct(_cct) {}
///< copies that are affected by the
///< specific write
- vector<AllocExtent> extents_to_collect; ///< protrusive extents that should
- ///< be collected if GC takes place
+ ///< protrusive extents that should be collected if GC takes place
+ vector<bluestore_pextent_t> extents_to_collect;
boost::optional<uint64_t > used_alloc_unit; ///< last processed allocation
///< unit when traversing
uint64_t alloc_unit,
uint64_t max_alloc_size,
int64_t hint,
- mempool::bluestore_alloc::vector<AllocExtent> *extents)
+ PExtentVector *extents)
{
uint64_t allocated_size = 0;
uint64_t offset = 0;
int64_t allocate(
uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size,
- int64_t hint, mempool::bluestore_alloc::vector<AllocExtent> *extents) override;
+ int64_t hint, PExtentVector *extents) override;
int64_t allocate_int(
uint64_t want_size, uint64_t alloc_unit, int64_t hint,
#include "include/encoding.h"
#include "include/denc.h"
-class bluefs_extent_t : public AllocExtent{
+class bluefs_extent_t {
public:
uint8_t bdev;
+ uint64_t offset = 0;
+ uint32_t length = 0;
bluefs_extent_t(uint8_t b = 0, uint64_t o = 0, uint32_t l = 0)
- : AllocExtent(o, l), bdev(b) {}
+ : bdev(b), offset(o), length(l) {}
+ uint64_t end() const { return offset + length; }
DENC(bluefs_extent_t, v, p) {
DENC_START(1, 1, p);
denc_lba(v.offset, p);
#include "include/stringify.h"
void ExtentList::add_extents(int64_t start, int64_t count) {
- AllocExtent *last_extent = NULL;
+ bluestore_pextent_t *last_extent = NULL;
bool can_merge = false;
if (!m_extents->empty()) {
if (can_merge) {
last_extent->length += (count * m_block_size);
} else {
- m_extents->emplace_back(AllocExtent(start * m_block_size,
+ m_extents->emplace_back(bluestore_pextent_t(start * m_block_size,
count * m_block_size));
}
}
return 0;
}
-void bluestore_blob_t::allocated(uint32_t b_off, uint32_t length, const AllocExtentVector& allocs)
+void bluestore_blob_t::allocated(uint32_t b_off, uint32_t length, const PExtentVector& allocs)
{
if (extents.size() == 0) {
// if blob is compressed then logical length to be already configured
if (b_off) {
extents.emplace_back(
bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, b_off));
+
}
uint32_t new_len = b_off;
for (auto& a : allocs) {
void flush() {
if (invalid) {
v.emplace_back(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET,
- invalid));
+ invalid));
+
invalid = 0;
}
}
}
else {
flush();
- v.emplace_back(bluestore_pextent_t(offset, length));
+ v.emplace_back(offset, length);
}
}
};
};
WRITE_CLASS_DENC(bluestore_cnode_t)
-class AllocExtent;
-typedef mempool::bluestore_alloc::vector<AllocExtent> AllocExtentVector;
-class AllocExtent {
-public:
- uint64_t offset;
- uint32_t length;
-
- AllocExtent() {
- offset = 0;
- length = 0;
- }
-
- AllocExtent(int64_t off, int32_t len) : offset(off), length(len) { }
- uint64_t end() const {
- return offset + length;
- }
- bool operator==(const AllocExtent& other) const {
- return offset == other.offset && length == other.length;
- }
-};
-
-inline static ostream& operator<<(ostream& out, const AllocExtent& e) {
- return out << "0x" << std::hex << e.offset << "~" << e.length << std::dec;
-}
-
-class ExtentList {
- AllocExtentVector *m_extents;
- int64_t m_block_size;
- int64_t m_max_blocks;
-
-public:
- void init(AllocExtentVector *extents, int64_t block_size,
- uint64_t max_alloc_size) {
- m_extents = extents;
- m_block_size = block_size;
- m_max_blocks = max_alloc_size / block_size;
- assert(m_extents->empty());
- }
-
- ExtentList(AllocExtentVector *extents, int64_t block_size) {
- init(extents, block_size, 0);
- }
-
- ExtentList(AllocExtentVector *extents, int64_t block_size,
- uint64_t max_alloc_size) {
- init(extents, block_size, max_alloc_size);
- }
+/// pextent: physical extent
+struct bluestore_pextent_t {
+ static const uint64_t INVALID_OFFSET = ~0ull;
- void reset() {
- m_extents->clear();
- }
+ uint64_t offset = 0;
+ uint32_t length = 0;
- void add_extents(int64_t start, int64_t count);
+ bluestore_pextent_t() {}
+ bluestore_pextent_t(uint64_t o, uint64_t l) : offset(o), length(l) {}
+ bluestore_pextent_t(const bluestore_pextent_t &ext) :
+ offset(ext.offset), length(ext.length) {}
- AllocExtentVector *get_extents() {
- return m_extents;
+ bool is_valid() const {
+ return offset != INVALID_OFFSET;
}
-
- std::pair<int64_t, int64_t> get_nth_extent(int index) {
- return std::make_pair
- ((*m_extents)[index].offset / m_block_size,
- (*m_extents)[index].length / m_block_size);
+ uint64_t end() const {
+ return offset != INVALID_OFFSET ? offset + length : INVALID_OFFSET;
}
- int64_t get_extent_count() {
- return m_extents->size();
- }
-};
-
-
-/// pextent: physical extent
-struct bluestore_pextent_t : public AllocExtent {
- const static uint64_t INVALID_OFFSET = ~0ull;
-
- bluestore_pextent_t() : AllocExtent() {}
- bluestore_pextent_t(uint64_t o, uint64_t l) : AllocExtent(o, l) {}
- bluestore_pextent_t(const AllocExtent &ext) :
- AllocExtent(ext.offset, ext.length) { }
-
- bluestore_pextent_t& operator=(const AllocExtent &ext) {
- offset = ext.offset;
- length = ext.length;
- return *this;
- }
- bool is_valid() const {
- return offset != INVALID_OFFSET;
+ bool operator==(const bluestore_pextent_t& other) const {
+ return offset == other.offset && length == other.length;
}
DENC(bluestore_pextent_t, v, p) {
};
+class ExtentList {
+ PExtentVector *m_extents;
+ int64_t m_block_size;
+ int64_t m_max_blocks;
+
+public:
+ void init(PExtentVector *extents, int64_t block_size,
+ uint64_t max_alloc_size) {
+ m_extents = extents;
+ m_block_size = block_size;
+ m_max_blocks = max_alloc_size / block_size;
+ assert(m_extents->empty());
+ }
+
+ ExtentList(PExtentVector *extents, int64_t block_size) {
+ init(extents, block_size, 0);
+ }
+
+ ExtentList(PExtentVector *extents, int64_t block_size,
+ uint64_t max_alloc_size) {
+ init(extents, block_size, max_alloc_size);
+ }
+
+ void reset() {
+ m_extents->clear();
+ }
+
+ void add_extents(int64_t start, int64_t count);
+
+ PExtentVector *get_extents() {
+ return m_extents;
+ }
+
+ std::pair<int64_t, int64_t> get_nth_extent(int index) {
+ return std::make_pair
+ ((*m_extents)[index].offset / m_block_size,
+ (*m_extents)[index].length / m_block_size);
+ }
+
+ int64_t get_extent_count() {
+ return m_extents->size();
+ }
+};
+
/// extent_map: a map of reference counted extents
struct bluestore_extent_ref_map_t {
struct record_t {
}
void split(uint32_t blob_offset, bluestore_blob_t& rb);
- void allocated(uint32_t b_off, uint32_t length, const AllocExtentVector& allocs);
+ void allocated(uint32_t b_off, uint32_t length, const PExtentVector& allocs);
void allocated_test(const bluestore_pextent_t& alloc); // intended for UT only
/// updates blob's pextents container and return unused pextents eligible
init_alloc(blocks, block_size);
alloc->init_add_free(block_size, block_size);
EXPECT_EQ(alloc->reserve(block_size), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(block_size, alloc->allocate(block_size, block_size,
0, (int64_t) 0, &extents));
}
{
alloc->init_add_free(0, block_size * 4);
EXPECT_EQ(alloc->reserve(block_size * 4), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
0, (int64_t) 0, &extents));
alloc->init_add_free(0, block_size * 2);
alloc->init_add_free(3 * block_size, block_size * 2);
EXPECT_EQ(alloc->reserve(block_size * 4), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
{
alloc->init_add_free(0, block_size * 4);
EXPECT_EQ(alloc->reserve(block_size * 4), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
block_size, (int64_t) 0, &extents));
{
alloc->init_add_free(0, block_size * 4);
EXPECT_EQ(alloc->reserve(block_size * 4), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
2 * block_size, (int64_t) 0, &extents));
{
alloc->init_add_free(0, block_size * 1024);
EXPECT_EQ(alloc->reserve(block_size * 1024), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(1024 * block_size,
alloc->allocate(1024 * (uint64_t)block_size,
(uint64_t) block_size * 4,
{
alloc->init_add_free(0, block_size * 16);
EXPECT_EQ(alloc->reserve(block_size * 16), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(16 * block_size,
alloc->allocate(16 * (uint64_t)block_size, (uint64_t) block_size,
2 * block_size, (int64_t) 0, &extents));
alloc->init_add_free(block_size * 512, block_size * 256);
EXPECT_EQ(alloc->reserve(block_size * 512), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(512 * block_size,
alloc->allocate(512 * (uint64_t)block_size,
(uint64_t) block_size * 256,
for (int64_t big = mas; big < 1048576*128; big*=2) {
cout << big << std::endl;
EXPECT_EQ(alloc->reserve(big), 0);
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(big,
alloc->allocate(big, mas, 0, &extents));
}
init_alloc(blocks, 1);
alloc->init_add_free(0, blocks);
- AllocExtentVector extents;
+ PExtentVector extents;
alloc->reserve(blocks);
allocated = alloc->allocate(1, 1, 1, zone_size, &extents);
alloc->init_add_free(3670016, 2097152);
EXPECT_EQ(0, alloc->reserve(want_size));
- AllocExtentVector extents;
+ PExtentVector extents;
EXPECT_EQ(want_size, alloc->allocate(want_size, alloc_unit, 0, &extents));
}
bmap_test_assert(lock);
int64_t blk_size = 1024;
- AllocExtentVector extents;
+ PExtentVector extents;
std::unique_ptr<ExtentList> block_list = std::make_unique<ExtentList>(&extents, blk_size);
allocated = zone->alloc_blocks_dis(zone->size() / 2, 1, 0, 0, block_list.get());
bmap_test_assert(allocated == zone->size() / 2);
{
int64_t blk_size = 1024;
- AllocExtentVector extents;
+ PExtentVector extents;
std::unique_ptr<ExtentList> block_list = std::make_unique<ExtentList>(&extents, blk_size);
zone = std::make_unique<BitMapZone>(g_ceph_context, total_blocks, 0);
*/
{
int64_t blk_size = 1;
- AllocExtentVector extents;
+ PExtentVector extents;
for (int i = 1; i <= total_blocks - BmapEntry::size(); i = i << 1) {
for (int64_t j = 0; j <= BmapEntry::size(); j = 1 << j) {
for (int64_t iter = 0; iter < max_iter; iter++) {
for (int64_t j = 0; alloc_size <= total_blocks; j++) {
int64_t blk_size = 1024;
- AllocExtentVector extents;
+ PExtentVector extents;
std::unique_ptr<ExtentList> block_list = std::make_unique<ExtentList>(&extents, blk_size, alloc_size);
for (int64_t i = 0; i < total_blocks; i += alloc_size) {
bmap_test_assert(alloc->reserve_blocks(alloc_size) == true);
}
int64_t blk_size = 1024;
- AllocExtentVector extents;
+ PExtentVector extents;
ExtentList *block_list = new ExtentList(&extents, blk_size);
int64_t allocated = 0;
int64_t verified = 0;
int64_t count = 0;
- AllocExtentVector extents;
+ PExtentVector extents;
std::unique_ptr<ExtentList> block_list = std::make_unique<ExtentList>(&extents, blk_size, max_alloc);
int64_t alloced = 0;
int64_t num_blocks = alloc->size() / NUM_THREADS;
- AllocExtentVector extents;
+ PExtentVector extents;
std::unique_ptr<ExtentList> block_list = std::make_unique<ExtentList>(&extents, 4096);
while (num_iters--) {
ASSERT_EQ(saving, 1);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.size(), 1u);
- ASSERT_EQ(to_collect[0], AllocExtent(100,10) );
+ ASSERT_EQ(to_collect[0], bluestore_pextent_t(100,10) );
em.clear();
old_extents.clear();
ASSERT_EQ(saving, 2);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.size(), 2u);
- ASSERT_TRUE(to_collect[0] == AllocExtent(0x0,0x8000) ||
- to_collect[1] == AllocExtent(0x0,0x8000));
- ASSERT_TRUE(to_collect[0] == AllocExtent(0x3f000,0x1000) ||
- to_collect[1] == AllocExtent(0x3f000,0x1000));
+ ASSERT_TRUE(to_collect[0] == bluestore_pextent_t(0x0,0x8000) ||
+ to_collect[1] == bluestore_pextent_t(0x0,0x8000));
+ ASSERT_TRUE(to_collect[0] == bluestore_pextent_t(0x3f000,0x1000) ||
+ to_collect[1] == bluestore_pextent_t(0x3f000,0x1000));
em.clear();
old_extents.clear();
ASSERT_EQ(saving, 2);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.size(), 2u);
- ASSERT_TRUE(to_collect[0] == AllocExtent(0x0,0x8000) ||
- to_collect[1] == AllocExtent(0x0,0x8000));
- ASSERT_TRUE(to_collect[0] == AllocExtent(0x3f000,0x1000) ||
- to_collect[1] == AllocExtent(0x3f000,0x1000));
+ ASSERT_TRUE(to_collect[0] == bluestore_pextent_t(0x0,0x8000) ||
+ to_collect[1] == bluestore_pextent_t(0x0,0x8000));
+ ASSERT_TRUE(to_collect[0] == bluestore_pextent_t(0x3f000,0x1000) ||
+ to_collect[1] == bluestore_pextent_t(0x3f000,0x1000));
em.clear();
old_extents.clear();