struct backref_map_val_t {
extent_len_t len = 0; ///< length of extents
- laddr_t laddr = 0; ///< logical address of extents
+ laddr_t laddr = L_ADDR_MIN; ///< logical address of extents
extent_types_t type = extent_types_t::ROOT;
backref_map_val_t() = default;
struct backref_map_val_le_t {
extent_len_le_t len = init_extent_len_le(0);
- laddr_le_t laddr = laddr_le_t(0);
+ laddr_le_t laddr = laddr_le_t(L_ADDR_MIN);
extent_types_le_t type = 0;
backref_map_val_le_t() = default;
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include "crimson/os/seastore/seastore_types.h"
+
+namespace crimson::os::seastore {
+namespace details {
+
+// this interval_set structure is copied from include/interval_set.h to allow
+// use the different type for length as the laddr_t becomes struct, and avoid
+// changing the behaviors of other components.
+//
+// The latest commit is 58860ce3f60489d258aaa10fd783e68083261937
+
+template<typename T, typename L, template<typename, typename, typename ...> class C = std::map>
+class interval_set {
+ public:
+ using Map = C<T, L>;
+ using value_type = typename Map::value_type;
+ using offset_type = T;
+ using length_type = L;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using size_type = typename Map::size_type;
+
+ class const_iterator;
+
+ class iterator
+ {
+ public:
+ using difference_type = ssize_t;
+ using value_type = typename Map::value_type;
+ using pointer = typename Map::value_type*;
+ using reference = typename Map::value_type&;
+ using iterator_category = std::forward_iterator_tag;
+
+ explicit iterator(typename Map::iterator iter)
+ : _iter(iter)
+ { }
+
+ // For the copy constructor and assignment operator, the compiler-generated functions, which
+ // perform simple bitwise copying, should be fine.
+
+ bool operator==(const iterator& rhs) const {
+ return (_iter == rhs._iter);
+ }
+
+ bool operator!=(const iterator& rhs) const {
+ return (_iter != rhs._iter);
+ }
+
+ // Dereference this iterator to get a pair.
+ reference operator*() const {
+ return *_iter;
+ }
+
+ // Return the interval start.
+ offset_type get_start() const {
+ return _iter->first;
+ }
+
+ // Return the interval length.
+ length_type get_len() const {
+ return _iter->second;
+ }
+
+ offset_type get_end() const {
+ return _iter->first + _iter->second;
+ }
+
+ // Set the interval length.
+ void set_len(const length_type& len) {
+ _iter->second = len;
+ }
+
+ // Preincrement
+ iterator& operator++()
+ {
+ ++_iter;
+ return *this;
+ }
+
+ // Postincrement
+ iterator operator++(int)
+ {
+ iterator prev(_iter);
+ ++_iter;
+ return prev;
+ }
+
+ // Predecrement
+ iterator& operator--()
+ {
+ --_iter;
+ return *this;
+ }
+
+ // Postdecrement
+ iterator operator--(int)
+ {
+ iterator prev(_iter);
+ --_iter;
+ return prev;
+ }
+
+ friend class interval_set::const_iterator;
+
+ protected:
+ typename Map::iterator _iter;
+ friend class interval_set;
+ };
+
+ class const_iterator
+ {
+ public:
+ using difference_type = ssize_t;
+ using value_type = const typename Map::value_type;
+ using pointer = const typename Map::value_type*;
+ using reference = const typename Map::value_type&;
+ using iterator_category = std::forward_iterator_tag;
+
+ explicit const_iterator(typename Map::const_iterator iter)
+ : _iter(iter)
+ { }
+
+ const_iterator(const iterator &i)
+ : _iter(i._iter)
+ { }
+
+ // For the copy constructor and assignment operator, the compiler-generated functions, which
+ // perform simple bitwise copying, should be fine.
+
+ bool operator==(const const_iterator& rhs) const {
+ return (_iter == rhs._iter);
+ }
+
+ bool operator!=(const const_iterator& rhs) const {
+ return (_iter != rhs._iter);
+ }
+
+ // Dereference this iterator to get a pair.
+ reference operator*() const {
+ return *_iter;
+ }
+
+ // Return the interval start.
+ offset_type get_start() const {
+ return _iter->first;
+ }
+ offset_type get_end() const {
+ return _iter->first + _iter->second;
+ }
+
+ // Return the interval length.
+ length_type get_len() const {
+ return _iter->second;
+ }
+
+ // Preincrement
+ const_iterator& operator++()
+ {
+ ++_iter;
+ return *this;
+ }
+
+ // Postincrement
+ const_iterator operator++(int)
+ {
+ const_iterator prev(_iter);
+ ++_iter;
+ return prev;
+ }
+
+ // Predecrement
+ iterator& operator--()
+ {
+ --_iter;
+ return *this;
+ }
+
+ // Postdecrement
+ iterator operator--(int)
+ {
+ iterator prev(_iter);
+ --_iter;
+ return prev;
+ }
+
+ protected:
+ typename Map::const_iterator _iter;
+ };
+
+ interval_set() = default;
+ interval_set(Map&& other) {
+ m.swap(other);
+ for (const auto& p : m) {
+ _size += p.second;
+ }
+ }
+
+ size_type num_intervals() const
+ {
+ return m.size();
+ }
+
+ iterator begin() {
+ return iterator(m.begin());
+ }
+
+ iterator lower_bound(T start) {
+ return iterator(find_inc_m(start));
+ }
+
+ iterator end() {
+ return iterator(m.end());
+ }
+
+ const_iterator begin() const {
+ return const_iterator(m.begin());
+ }
+
+ const_iterator lower_bound(T start) const {
+ return const_iterator(find_inc(start));
+ }
+
+ const_iterator end() const {
+ return const_iterator(m.end());
+ }
+
+ // helpers
+ private:
+ auto find_inc(T start) const {
+ auto p = m.lower_bound(start); // p->first >= start
+ if (p != m.begin() &&
+ (p == m.end() || p->first > start)) {
+ --p; // might overlap?
+ if (p->first + p->second <= start)
+ ++p; // it doesn't.
+ }
+ return p;
+ }
+
+ auto find_inc_m(T start) {
+ auto p = m.lower_bound(start);
+ if (p != m.begin() &&
+ (p == m.end() || p->first > start)) {
+ --p; // might overlap?
+ if (p->first + p->second <= start)
+ ++p; // it doesn't.
+ }
+ return p;
+ }
+
+ auto find_adj(T start) const {
+ auto p = m.lower_bound(start);
+ if (p != m.begin() &&
+ (p == m.end() || p->first > start)) {
+ --p; // might touch?
+ if (p->first + p->second < start)
+ ++p; // it doesn't.
+ }
+ return p;
+ }
+
+ auto find_adj_m(T start) {
+ auto p = m.lower_bound(start);
+ if (p != m.begin() &&
+ (p == m.end() || p->first > start)) {
+ --p; // might touch?
+ if (p->first + p->second < start)
+ ++p; // it doesn't.
+ }
+ return p;
+ }
+
+ void intersection_size_asym(const interval_set &s, const interval_set &l) {
+ auto ps = s.m.begin();
+ ceph_assert(ps != s.m.end());
+ auto offset = ps->first;
+ bool first = true;
+ auto mi = m.begin();
+
+ while (1) {
+ if (first)
+ first = false;
+ auto pl = l.find_inc(offset);
+ if (pl == l.m.end())
+ break;
+ while (ps != s.m.end() && ps->first + ps->second <= pl->first)
+ ++ps;
+ if (ps == s.m.end())
+ break;
+ offset = pl->first + pl->second;
+ if (offset <= ps->first) {
+ offset = ps->first;
+ continue;
+ }
+
+ if (*ps == *pl) {
+ do {
+ mi = m.insert(mi, *ps);
+ _size += ps->second;
+ ++ps;
+ ++pl;
+ } while (ps != s.m.end() && pl != l.m.end() && *ps == *pl);
+ if (ps == s.m.end())
+ break;
+ offset = ps->first;
+ continue;
+ }
+
+ auto start = std::max<T>(ps->first, pl->first);
+ auto en = std::min<T>(ps->first + ps->second, offset);
+ ceph_assert(en > start);
+ mi = m.emplace_hint(mi, start, en - start);
+ _size += mi->second;
+ if (ps->first + ps->second <= offset) {
+ ++ps;
+ if (ps == s.m.end())
+ break;
+ offset = ps->first;
+ }
+ }
+ }
+
+ bool subset_size_sym(const interval_set &b) const {
+ auto pa = m.begin(), pb = b.m.begin();
+ const auto a_end = m.end(), b_end = b.m.end();
+
+ while (pa != a_end && pb != b_end) {
+ while (pb->first + pb->second <= pa->first) {
+ ++pb;
+ if (pb == b_end)
+ return false;
+ }
+
+ if (*pa == *pb) {
+ do {
+ ++pa;
+ ++pb;
+ } while (pa != a_end && pb != b_end && *pa == *pb);
+ continue;
+ }
+
+ // interval begins before other
+ if (pa->first < pb->first)
+ return false;
+ // interval is longer than other
+ if (pa->first + pa->second > pb->first + pb->second)
+ return false;
+
+ ++pa;
+ }
+
+ return pa == a_end;
+ }
+
+ public:
+ bool operator==(const interval_set& other) const {
+ return _size == other._size && m == other.m;
+ }
+
+ uint64_t size() const {
+ return _size;
+ }
+
+ void bound_encode(size_t& p) const {
+ denc_traits<Map>::bound_encode(m, p);
+ }
+ void encode(ceph::buffer::list::contiguous_appender& p) const {
+ denc(m, p);
+ }
+ void decode(ceph::buffer::ptr::const_iterator& p) {
+ denc(m, p);
+ _size = 0;
+ for (const auto& p : m) {
+ _size += p.second;
+ }
+ }
+ void decode(ceph::buffer::list::iterator& p) {
+ denc(m, p);
+ _size = 0;
+ for (const auto& p : m) {
+ _size += p.second;
+ }
+ }
+
+ void encode_nohead(ceph::buffer::list::contiguous_appender& p) const {
+ denc_traits<Map>::encode_nohead(m, p);
+ }
+ void decode_nohead(int n, ceph::buffer::ptr::const_iterator& p) {
+ denc_traits<Map>::decode_nohead(n, m, p);
+ _size = 0;
+ for (const auto& p : m) {
+ _size += p.second;
+ }
+ }
+
+ void clear() {
+ m.clear();
+ _size = 0;
+ }
+
+ bool contains(T i, T *pstart=0, L *plen=0) const {
+ auto p = find_inc(i);
+ if (p == m.end()) return false;
+ if (p->first > i) return false;
+ if (p->first+p->second <= i) return false;
+ ceph_assert(p->first <= i && p->first+p->second > i);
+ if (pstart)
+ *pstart = p->first;
+ if (plen)
+ *plen = p->second;
+ return true;
+ }
+ bool contains(T start, L len) const {
+ auto p = find_inc(start);
+ if (p == m.end()) return false;
+ if (p->first > start) return false;
+ if (p->first+p->second <= start) return false;
+ ceph_assert(p->first <= start && p->first+p->second > start);
+ if (p->first+p->second < start+len) return false;
+ return true;
+ }
+ bool intersects(T start, L len) const {
+ interval_set a;
+ a.insert(start, len);
+ interval_set i;
+ i.intersection_of( *this, a );
+ if (i.empty()) return false;
+ return true;
+ }
+
+ // outer range of set
+ bool empty() const {
+ return m.empty();
+ }
+ offset_type range_start() const {
+ ceph_assert(!empty());
+ auto p = m.begin();
+ return p->first;
+ }
+ offset_type range_end() const {
+ ceph_assert(!empty());
+ auto p = m.rbegin();
+ return p->first + p->second;
+ }
+
+ // interval start after p (where p not in set)
+ bool starts_after(T i) const {
+ ceph_assert(!contains(i));
+ auto p = find_inc(i);
+ if (p == m.end()) return false;
+ return true;
+ }
+ offset_type start_after(T i) const {
+ ceph_assert(!contains(i));
+ auto p = find_inc(i);
+ return p->first;
+ }
+
+ // interval end that contains start
+ offset_type end_after(T start) const {
+ ceph_assert(contains(start));
+ auto p = find_inc(start);
+ return p->first+p->second;
+ }
+
+ void insert(T val) {
+ insert(val, 1);
+ }
+
+ void insert(T start, L len, T *pstart=0, L *plen=0) {
+ //cout << "insert " << start << "~" << len << endl;
+ ceph_assert(len > 0);
+ _size += len;
+ auto p = find_adj_m(start);
+ if (p == m.end()) {
+ m[start] = len; // new interval
+ if (pstart)
+ *pstart = start;
+ if (plen)
+ *plen = len;
+ } else {
+ if (p->first < start) {
+
+ if (p->first + p->second != start) {
+ //cout << "p is " << p->first << "~" << p->second << ", start is " << start << ", len is " << len << endl;
+ ceph_abort();
+ }
+
+ p->second += len; // append to end
+
+ auto n = p;
+ ++n;
+ if (pstart)
+ *pstart = p->first;
+ if (n != m.end() &&
+ start+len == n->first) { // combine with next, too!
+ p->second += n->second;
+ if (plen)
+ *plen = p->second;
+ m.erase(n);
+ } else {
+ if (plen)
+ *plen = p->second;
+ }
+ } else {
+ if (start+len == p->first) {
+ if (pstart)
+ *pstart = start;
+ if (plen)
+ *plen = len + p->second;
+ L psecond = p->second;
+ m.erase(p);
+ m[start] = len + psecond; // append to front
+ } else {
+ ceph_assert(p->first > start+len);
+ if (pstart)
+ *pstart = start;
+ if (plen)
+ *plen = len;
+ m[start] = len; // new interval
+ }
+ }
+ }
+ }
+
+ void swap(interval_set& other) {
+ m.swap(other.m);
+ std::swap(_size, other._size);
+ }
+
+ void erase(const iterator &i) {
+ _size -= i.get_len();
+ m.erase(i._iter);
+ }
+
+ void erase(T val) {
+ erase(val, 1);
+ }
+
+ void erase(T start, L len,
+ std::function<bool(T, L)> claim = {}) {
+ auto p = find_inc_m(start);
+
+ _size -= len;
+
+ ceph_assert(p != m.end());
+ ceph_assert(p->first <= start);
+
+ L before = start - p->first;
+ ceph_assert(p->second >= before+len);
+ L after = p->second - before - len;
+ if (before) {
+ if (claim && claim(p->first, before)) {
+ _size -= before;
+ m.erase(p);
+ } else {
+ p->second = before; // shorten bit before
+ }
+ } else {
+ m.erase(p);
+ }
+ if (after) {
+ if (claim && claim(start + len, after)) {
+ _size -= after;
+ } else {
+ m[start + len] = after;
+ }
+ }
+ }
+
+ void subtract(const interval_set &a) {
+ for (const auto& [start, len] : a.m) {
+ erase(start, len);
+ }
+ }
+
+ void insert(const interval_set &a) {
+ for (const auto& [start, len] : a.m) {
+ insert(start, len);
+ }
+ }
+
+
+ void intersection_of(const interval_set &a, const interval_set &b) {
+ ceph_assert(&a != this);
+ ceph_assert(&b != this);
+ clear();
+
+ const interval_set *s, *l;
+
+ if (a.size() < b.size()) {
+ s = &a;
+ l = &b;
+ } else {
+ s = &b;
+ l = &a;
+ }
+
+ if (!s->size())
+ return;
+
+ /*
+ * Use the lower_bound algorithm for larger size ratios
+ * where it performs better, but not for smaller size
+ * ratios where sequential search performs better.
+ */
+ if (l->size() / s->size() >= 10) {
+ intersection_size_asym(*s, *l);
+ return;
+ }
+
+ auto pa = a.m.begin();
+ auto pb = b.m.begin();
+ auto mi = m.begin();
+
+ while (pa != a.m.end() && pb != b.m.end()) {
+ // passing?
+ if (pa->first + pa->second <= pb->first)
+ { pa++; continue; }
+ if (pb->first + pb->second <= pa->first)
+ { pb++; continue; }
+
+ if (*pa == *pb) {
+ do {
+ mi = m.insert(mi, *pa);
+ _size += pa->second;
+ ++pa;
+ ++pb;
+ } while (pa != a.m.end() && pb != b.m.end() && *pa == *pb);
+ continue;
+ }
+
+ T start = std::max(pa->first, pb->first);
+ T en = std::min(pa->first+pa->second, pb->first+pb->second);
+ ceph_assert(en > start);
+ mi = m.emplace_hint(mi, start, en - start);
+ _size += mi->second;
+ if (pa->first+pa->second > pb->first+pb->second)
+ pb++;
+ else
+ pa++;
+ }
+ }
+ void intersection_of(const interval_set& b) {
+ interval_set a;
+ swap(a);
+ intersection_of(a, b);
+ }
+
+ void union_of(const interval_set &a, const interval_set &b) {
+ ceph_assert(&a != this);
+ ceph_assert(&b != this);
+ clear();
+
+ //cout << "union_of" << endl;
+
+ // a
+ m = a.m;
+ _size = a._size;
+
+ // - (a*b)
+ interval_set ab;
+ ab.intersection_of(a, b);
+ subtract(ab);
+
+ // + b
+ insert(b);
+ return;
+ }
+ void union_of(const interval_set &b) {
+ interval_set a;
+ swap(a);
+ union_of(a, b);
+ }
+ void union_insert(T off, L len) {
+ interval_set a;
+ a.insert(off, len);
+ union_of(a);
+ }
+
+ bool subset_of(const interval_set &big) const {
+ if (!size())
+ return true;
+ if (size() > big.size())
+ return false;
+ if (range_end() > big.range_end())
+ return false;
+
+ /*
+ * Use the lower_bound algorithm for larger size ratios
+ * where it performs better, but not for smaller size
+ * ratios where sequential search performs better.
+ */
+ if (big.size() / size() < 10)
+ return subset_size_sym(big);
+
+ for (const auto& [start, len] : m) {
+ if (!big.contains(start, len)) return false;
+ }
+ return true;
+ }
+
+ /*
+ * build a subset of @other, starting at or after @start, and including
+ * @len worth of values, skipping holes. e.g.,
+ * span_of([5~10,20~5], 8, 5) -> [8~2,20~3]
+ */
+ void span_of(const interval_set &other, T start, L len) {
+ clear();
+ auto p = other.find_inc(start);
+ if (p == other.m.end())
+ return;
+ if (p->first < start) {
+ if (p->first + p->second < start)
+ return;
+ if (p->first + p->second < start + len) {
+ L howmuch = p->second - (start - p->first);
+ insert(start, howmuch);
+ len -= howmuch;
+ p++;
+ } else {
+ insert(start, len);
+ return;
+ }
+ }
+ while (p != other.m.end() && len > 0) {
+ if (p->second < len) {
+ insert(p->first, p->second);
+ len -= p->second;
+ p++;
+ } else {
+ insert(p->first, len);
+ return;
+ }
+ }
+ }
+
+ /*
+ * Move contents of m into another Map. Use that instead of
+ * encoding interval_set into bufferlist then decoding it back into Map.
+ */
+ Map detach() && {
+ return std::move(m);
+ }
+
+private:
+ // data
+ uint64_t _size = 0;
+ Map m; // map start -> len
+};
+} // namespace details
+using laddr_interval_set_t = details::interval_set<laddr_t, extent_len_t>;
+} // namespace crimson::os::seastore
return iter.next(c).si_then([&state, &alloc_info](auto it) {
state.insert_iter = it;
if (alloc_info.key == L_ADDR_NULL) {
- state.last_end += alloc_info.len;
+ state.last_end = state.last_end + alloc_info.len;
}
});
});
#include "crimson/common/log.h"
#include "crimson/os/seastore/object_data_handler.h"
+#include "crimson/os/seastore/laddr_interval_set.h"
namespace {
seastar::logger& logger() {
nullptr, new_offset, new_len, p->get_key(), p->get_length(), b);
}
- uint64_t laddr_start;
+ laddr_t laddr_start;
extent_len_t length;
std::optional<bufferlist> bl;
pin(std::move(pin)), new_offset(new_offset), new_len(new_len) {}
extent_to_remap_t(type_t type,
LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len,
- uint64_t ori_laddr, extent_len_t ori_len, std::optional<bufferlist> b)
+ laddr_t ori_laddr, extent_len_t ori_len, std::optional<bufferlist> b)
: type(type),
pin(std::move(pin)), new_offset(new_offset), new_len(new_len),
laddr_start(ori_laddr), length(ori_len), bl(b) {}
}
}
- interval_set<uint64_t> pre_alloc_addr_removed, pre_alloc_addr_remapped;
+ laddr_interval_set_t pre_alloc_addr_removed, pre_alloc_addr_remapped;
if (delta_based_overwrite_max_extent_size) {
for (auto &r : ops.to_remove) {
if (r->is_data_stable() && !r->is_zero_reserved()) {
erased_num = std::erase_if(
ops.to_remove,
[®ion, &to_remap](auto &r) {
- interval_set<uint64_t> range;
+ laddr_interval_set_t range;
range.insert(r->get_key(), r->get_length());
if (range.contains(region.addr, region.len) && !r->is_clone()) {
to_remap.push_back(extent_to_remap_t::create_overwrite(
erased_num = std::erase_if(
ops.to_remap,
[®ion, &to_remap](auto &r) {
- interval_set<uint64_t> range;
+ laddr_interval_set_t range;
range.insert(r.pin->get_key(), r.pin->get_length());
if (range.contains(region.addr, region.len) && !r.pin->is_clone()) {
to_remap.push_back(extent_to_remap_t::create_overwrite(
[ctx](auto ®ion) {
LOG_PREFIX(object_data_handler.cc::do_insertions);
if (region.is_data()) {
- assert_aligned(region.addr);
+ ceph_assert(region.addr.is_aligned(ctx.tm.get_block_size()));
assert_aligned(region.len);
ceph_assert(region.len == region.bl->length());
DEBUGT("allocating extent: {}~{}",
off);
}
iter.copy(extent->get_length(), extent->get_bptr().c_str());
- off += extent->get_length();
+ off = off + extent->get_length();
left -= extent->get_length();
}
return ObjectDataHandler::write_iertr::now();
right_paddr(pins.back()->get_val()),
data_begin(offset),
data_end(offset + len),
- aligned_data_begin(p2align((uint64_t)data_begin, (uint64_t)block_size)),
- aligned_data_end(p2roundup((uint64_t)data_end, (uint64_t)block_size)),
+ aligned_data_begin(data_begin.get_aligned_laddr(block_size)),
+ aligned_data_end(data_end.get_roundup_laddr(block_size)),
left_operation(overwrite_operation_t::UNKNOWN),
right_operation(overwrite_operation_t::UNKNOWN),
block_size(block_size),
private:
// refer to overwrite_plan_t description
void validate() const {
- ceph_assert(pin_begin % block_size == 0);
- ceph_assert(pin_end % block_size == 0);
- ceph_assert(aligned_data_begin % block_size == 0);
- ceph_assert(aligned_data_end % block_size == 0);
+ ceph_assert(pin_begin.is_aligned(block_size));
+ ceph_assert(pin_end.is_aligned(block_size));
+ ceph_assert(aligned_data_begin.is_aligned(block_size));
+ ceph_assert(aligned_data_end.is_aligned(block_size));
ceph_assert(pin_begin <= aligned_data_begin);
ceph_assert(aligned_data_begin <= data_begin);
laddr_t offset, extent_len_t len,
std::optional<bufferptr> &&headptr, std::optional<bufferptr> &&tailptr)
{
- auto zero_left = p2roundup(offset, (laddr_t)block_size);
- auto zero_right = p2align(offset + len, (laddr_t)block_size);
+ auto zero_left = offset.get_roundup_laddr(block_size);
+ auto zero_right = (offset + len).get_aligned_laddr(block_size);
auto left = headptr ? (offset - headptr->length()) : offset;
auto right = tailptr ?
(offset + len + tailptr->length()) :
(!tailptr && (right == zero_right)));
assert(right > left);
- assert((left % block_size) == 0);
- assert((right % block_size) == 0);
+ assert(left.is_aligned(block_size));
+ assert(right.is_aligned(block_size));
// zero region too small for a reserved section,
// headptr and tailptr in same extent
bufferlist write_bl;
if (headptr) {
write_bl.append(*headptr);
- write_offset -= headptr->length();
- assert_aligned(write_offset);
+ write_offset = write_offset - headptr->length();
+ ceph_assert(write_offset.is_aligned(ctx.tm.get_block_size()));
}
write_bl.claim_append(*bl);
if (tailptr) {
struct omap_inner_key_t {
uint16_t key_off = 0;
uint16_t key_len = 0;
- laddr_t laddr = 0;
+ laddr_t laddr = L_ADDR_MIN;
omap_inner_key_t() = default;
omap_inner_key_t(uint16_t off, uint16_t len, laddr_t addr)
struct omap_inner_key_le_t {
ceph_le16 key_off{0};
ceph_le16 key_len{0};
- laddr_le_t laddr{0};
+ laddr_le_t laddr{L_ADDR_MIN};
omap_inner_key_le_t() = default;
omap_inner_key_le_t(const omap_inner_key_le_t &) = default;
Transaction& t, extent_len_t len) {
assert(len % ALIGNMENT == 0);
auto r = ceph::buffer::create_aligned(len, ALIGNMENT);
- auto addr = reinterpret_cast<laddr_t>(r->get_data());
+ auto addr = laddr_t(reinterpret_cast<laddr_t::Unsigned>(r->get_data()));
auto bp = ceph::bufferptr(std::move(r));
auto extent = Ref<DummyNodeExtent>(new DummyNodeExtent(std::move(bp)));
extent->set_laddr(addr);
// FIXME: It is possible that PGs from different pools share the same prefix
// if the mask 0xFF is not long enough, result in unexpected transaction
// conflicts.
- return ((uint64_t)(shard & 0XFF)<<56 |
- (uint64_t)(pool & 0xFF)<<48 |
- (uint64_t)(crush )<<16);
+ return laddr_t((uint64_t)(shard & 0xFF)<<56 |
+ (uint64_t)(pool & 0xFF)<<48 |
+ (uint64_t)(crush )<<16);
}
struct node_offset_packed_t {
#define _STAGE_T(NodeType) node_to_stage_t<typename NodeType::node_stage_t>
#define NXT_T(StageType) staged<typename StageType::next_param_t>
- laddr_t i_value{0};
+ laddr_t i_value = L_ADDR_MIN;
auto insert_size_2 =
_STAGE_T(InternalNode0)::insert_size(key, i_value);
auto insert_size_0 =
}
}
+std::ostream &operator<<(std::ostream &out, const laddr_t &laddr) {
+ return out << 'L' << std::hex << laddr.value << std::dec;
+}
+
std::ostream &operator<<(std::ostream &out, const pladdr_t &pladdr)
{
if (pladdr.is_laddr()) {
std::ostream &operator<<(std::ostream &out, const segment_id_t&);
// ondisk type of segment_id_t
-struct __attribute((packed)) segment_id_le_t {
+struct __attribute__((packed)) segment_id_le_t {
ceph_le32 segment = ceph_le32(segment_id_t().segment);
segment_id_le_t(const segment_id_t id) :
return as_res_paddr().block_relative_to(rhs.as_res_paddr());
}
-struct __attribute((packed)) paddr_le_t {
+struct __attribute__((packed)) paddr_le_t {
ceph_le64 internal_paddr =
ceph_le64(P_ADDR_NULL.internal_paddr);
constexpr journal_seq_t JOURNAL_SEQ_NULL = JOURNAL_SEQ_MAX;
// logical addr, see LBAManager, TransactionManager
-using laddr_t = uint64_t;
-constexpr laddr_t L_ADDR_MIN = std::numeric_limits<laddr_t>::min();
-constexpr laddr_t L_ADDR_MAX = std::numeric_limits<laddr_t>::max();
+class laddr_t {
+public:
+ // the type of underlying integer
+ using Unsigned = uint64_t;
+ static constexpr Unsigned RAW_VALUE_MAX =
+ std::numeric_limits<Unsigned>::max();
+
+ constexpr laddr_t() : laddr_t(RAW_VALUE_MAX) {}
+ constexpr explicit laddr_t(Unsigned value) : value(value) {}
+
+ bool is_aligned(Unsigned alignment) const {
+ assert(alignment != 0);
+ assert((alignment & (alignment - 1)) == 0);
+ return value == p2align(value, alignment);
+ }
+
+ laddr_t get_aligned_laddr(Unsigned alignment) const {
+ assert(alignment != 0);
+ assert((alignment & (alignment - 1)) == 0);
+ return laddr_t(p2align(value, alignment));
+ }
+
+ laddr_t get_roundup_laddr(Unsigned alignment) const {
+ assert(alignment != 0);
+ assert((alignment & (alignment - 1)) == 0);
+ return laddr_t(p2roundup(value, alignment));
+ }
+
+ /// laddr_t works like primitive integer type, encode/decode it manually
+ void encode(::ceph::buffer::list::contiguous_appender& p) const {
+ p.append(reinterpret_cast<const char *>(&value), sizeof(Unsigned));
+ }
+ void bound_encode(size_t& p) const {
+ p += sizeof(Unsigned);
+ }
+ void decode(::ceph::buffer::ptr::const_iterator& p) {
+ assert(static_cast<std::size_t>(p.get_end() - p.get_pos()) >= sizeof(Unsigned));
+ memcpy((char *)&value, p.get_pos_add(sizeof(Unsigned)), sizeof(Unsigned));
+ }
+
+ friend std::ostream &operator<<(std::ostream &, const laddr_t &);
+
+ friend auto operator<=>(const laddr_t&, const laddr_t&) = default;
+
+ friend laddr_t operator+(const laddr_t &laddr, const Unsigned &i) {
+ return laddr_t{laddr.value + i};
+ }
+
+ friend laddr_t operator+(const Unsigned &i, const laddr_t &laddr) {
+ return laddr_t{laddr.value + i};
+ }
+
+ friend laddr_t operator-(const laddr_t &laddr, const Unsigned &i) {
+ return laddr_t{laddr.value - i};
+ }
+
+ friend Unsigned operator-(const laddr_t &l, const laddr_t &r) {
+ return l.value - r.value;
+ }
+
+ friend struct laddr_le_t;
+ friend struct pladdr_le_t;
+
+private:
+ Unsigned value;
+};
+
+constexpr laddr_t L_ADDR_MAX = laddr_t(laddr_t::RAW_VALUE_MAX);
+constexpr laddr_t L_ADDR_MIN = laddr_t(0);
constexpr laddr_t L_ADDR_NULL = L_ADDR_MAX;
-constexpr laddr_t L_ADDR_ROOT = L_ADDR_MAX - 1;
-constexpr laddr_t L_ADDR_LBAT = L_ADDR_MAX - 2;
+constexpr laddr_t L_ADDR_ROOT = laddr_t(laddr_t::RAW_VALUE_MAX - 1);
+constexpr laddr_t L_ADDR_LBAT = laddr_t(laddr_t::RAW_VALUE_MAX - 2);
-struct __attribute((packed)) laddr_le_t {
- ceph_le64 laddr = ceph_le64(L_ADDR_NULL);
+struct __attribute__((packed)) laddr_le_t {
+ ceph_le64 laddr;
using orig_type = laddr_t;
- laddr_le_t() = default;
+ laddr_le_t() : laddr_le_t(L_ADDR_NULL) {}
laddr_le_t(const laddr_le_t &) = default;
explicit laddr_le_t(const laddr_t &addr)
- : laddr(ceph_le64(addr)) {}
+ : laddr(addr.value) {}
operator laddr_t() const {
return laddr_t(laddr);
}
laddr_le_t& operator=(laddr_t addr) {
ceph_le64 val;
- val = addr;
+ val = addr.value;
laddr = val;
return *this;
}
+
+ bool operator==(const laddr_le_t&) const = default;
};
constexpr uint64_t PL_ADDR_NULL = std::numeric_limits<uint64_t>::max();
MAX=2 // or NONE
};
-struct __attribute((packed)) pladdr_le_t {
+struct __attribute__((packed)) pladdr_le_t {
ceph_le64 pladdr = ceph_le64(PL_ADDR_NULL);
addr_type_t addr_type = addr_type_t::MAX;
: pladdr(
ceph_le64(
addr.is_laddr() ?
- std::get<0>(addr.pladdr) :
+ std::get<0>(addr.pladdr).value :
std::get<1>(addr.pladdr).internal_paddr)),
addr_type(
addr.is_laddr() ?
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::seastore_meta_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_id_t)
+WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::laddr_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::paddr_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::journal_seq_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::delta_info_t)
template <> struct fmt::formatter<crimson::os::seastore::extent_types_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::journal_seq_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::journal_tail_delta_t> : fmt::ostream_formatter {};
+template <> struct fmt::formatter<crimson::os::seastore::laddr_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::laddr_list_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::omap_root_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::paddr_list_t> : fmt::ostream_formatter {};
LOG_PREFIX(TransactionManager::alloc_non_data_extent);
SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
t, T::TYPE, len, placement_hint, laddr_hint);
- ceph_assert(is_aligned(laddr_hint, epm->get_block_size()));
+ ceph_assert(laddr_hint.is_aligned(epm->get_block_size()));
auto ext = cache->alloc_new_non_data_extent<T>(
t,
len,
LOG_PREFIX(TransactionManager::alloc_data_extents);
SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
t, T::TYPE, len, placement_hint, laddr_hint);
- ceph_assert(is_aligned(laddr_hint, epm->get_block_size()));
+ ceph_assert(laddr_hint.is_aligned(epm->get_block_size()));
auto exts = cache->alloc_new_data_extents<T>(
t,
len,
extent_len_t len) {
LOG_PREFIX(TransactionManager::reserve_region);
SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}", t, len, hint);
- ceph_assert(is_aligned(hint, epm->get_block_size()));
+ ceph_assert(hint.is_aligned(epm->get_block_size()));
return lba_manager->reserve_region(
t,
hint,
LOG_PREFIX(TransactionManager::clone_pin);
SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}, clone_offset {}",
t, mapping.get_length(), hint, intermediate_key);
- ceph_assert(is_aligned(hint, epm->get_block_size()));
+ ceph_assert(hint.is_aligned(epm->get_block_size()));
return lba_manager->clone_mapping(
t,
hint,
"write",
[this, offset, &ptr](auto& t)
{
- return tm->remove(t, offset
+ return tm->remove(t, laddr_t(offset)
).discard_result().handle_error_interruptible(
crimson::ct_error::enoent::handle([](auto) { return seastar::now(); }),
crimson::ct_error::pass_further_all{}
).si_then([this, offset, &t, &ptr] {
logger().debug("dec_ref complete");
- return tm->alloc_data_extents<TestBlock>(t, offset, ptr.length());
+ return tm->alloc_data_extents<TestBlock>(t, laddr_t(offset), ptr.length());
}).si_then([this, offset, &t, &ptr](auto extents) mutable {
boost::ignore_unused(offset); // avoid clang warning;
auto off = offset;
auto left = ptr.length();
size_t written = 0;
for (auto &ext : extents) {
- assert(ext->get_laddr() == (size_t)off);
+ assert(ext->get_laddr() == laddr_t(off));
assert(ext->get_bptr().length() <= left);
ptr.copy_out(written, ext->get_length(), ext->get_bptr().c_str());
off += ext->get_length();
"read",
[=, &blret, this](auto& t)
{
- return read_extents(t, offset, size
+ return read_extents(t, laddr_t(offset), size
).si_then([=, &blret](auto ext_list) {
- size_t cur = offset;
+ laddr_t cur(offset);
for (auto &i: ext_list) {
if (cur != i.first) {
assert(cur < i.first);
void initialize(Transaction& t, Onode& value) const {
auto &ftvalue = static_cast<FLTreeOnode&>(value);
ftvalue.update_onode_size(t, size);
- auto oroot = omap_root_t(id, cnt_modify,
+ auto oroot = omap_root_t(laddr_t(id), cnt_modify,
value.get_metadata_hint(block_size));
ftvalue.update_omap_root(t, oroot);
validate(value);
void validate(Onode& value) const {
auto& layout = value.get_layout();
ceph_assert(laddr_t(layout.size) == laddr_t{size});
- ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).addr == id);
+ ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).addr == laddr_t(id));
ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).depth == cnt_modify);
}
value.payload_size = 8;
#define _STAGE_T(NodeType) node_to_stage_t<typename NodeType::node_stage_t>
#define NXT_T(StageType) staged<typename StageType::next_param_t>
- laddr_t i_value{0};
+ laddr_t i_value = L_ADDR_MIN;
logger().info("\n"
"Bytes of a key-value insertion (full-string):\n"
" s-p-c, 'n'-'o', s-g => value_payload(8): typically internal 43B, leaf 59B\n"
static Ref<DummyChild> create_new(
const std::set<ghobject_t>& keys, bool is_level_tail, DummyChildPool& pool) {
- static laddr_t seed = 0;
- return create(keys, is_level_tail, seed++, pool);
+ static uint64_t seed = 0;
+ return create(keys, is_level_tail, laddr_t(seed++), pool);
}
static eagain_ifuture<Ref<DummyChild>> create_initial(
run_async([this] {
constexpr unsigned total = 16<<10;
for (unsigned i = 0; i < total; i += 16) {
- insert(i, 8);
+ insert(laddr_t(i), 8);
}
for (unsigned i = 0; i < total; i += 16) {
- check_lower_bound(i);
- check_lower_bound(i + 4);
- check_lower_bound(i + 8);
- check_lower_bound(i + 12);
+ check_lower_bound(laddr_t(i));
+ check_lower_bound(laddr_t(i + 4));
+ check_lower_bound(laddr_t(i + 8));
+ check_lower_bound(laddr_t(i + 12));
}
});
}
[=, &t, this](auto &) {
return lba_manager->scan_mappings(
*t.t,
- 0,
+ L_ADDR_MIN,
L_ADDR_MAX,
[iter=t.mappings.begin(), &t](auto l, auto p, auto len) mutable {
EXPECT_NE(iter, t.mappings.end());
TEST_F(btree_lba_manager_test, basic)
{
run_async([this] {
- laddr_t laddr = 0x12345678 * block_size;
+ laddr_t laddr = laddr_t(0x12345678 * block_size);
{
// write initial mapping
auto t = create_transaction();
auto t = create_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 5; ++j) {
- alloc_mappings(t, 0, block_size);
+ alloc_mappings(t, L_ADDR_MIN, block_size);
if ((i % 10 == 0) && (j == 3)) {
check_mappings(t);
check_mappings();
auto t = create_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 5; ++j) {
- auto rets = alloc_mappings(t, 0, block_size);
+ auto rets = alloc_mappings(t, L_ADDR_MIN, block_size);
// just to speed things up a bit
if ((i % 100 == 0) && (j == 3)) {
check_mappings(t);
{
auto t = create_transaction();
for (unsigned i = 0; i < 400; ++i) {
- alloc_mappings(t, 0, block_size);
+ alloc_mappings(t, L_ADDR_MIN, block_size);
}
check_mappings(t);
submit_test_transaction(std::move(t));
{
auto t = create_transaction();
for (unsigned i = 0; i < 600; ++i) {
- alloc_mappings(t, 0, block_size);
+ alloc_mappings(t, L_ADDR_MIN, block_size);
}
auto addresses = get_mapped_addresses(t);
for (unsigned i = 0; i != addresses.size(); ++i) {
}
};
iterate([&](auto &t, auto idx) {
- alloc_mappings(t, idx * block_size, block_size);
+ alloc_mappings(t, laddr_t(idx * block_size), block_size);
});
check_mappings();
iterate([&](auto &t, auto idx) {
if ((idx % 32) > 0) {
- decref_mapping(t, idx * block_size);
+ decref_mapping(t, laddr_t(idx * block_size));
}
});
check_mappings();
iterate([&](auto &t, auto idx) {
if ((idx % 32) > 0) {
- alloc_mappings(t, idx * block_size, block_size);
+ alloc_mappings(t, laddr_t(idx * block_size), block_size);
}
});
check_mappings();
iterate([&](auto &t, auto idx) {
- decref_mapping(t, idx * block_size);
+ decref_mapping(t, laddr_t(idx * block_size));
});
check_mappings();
});
objaddr_t offset,
extent_len_t length) {
auto ret = with_trans_intr(t, [&](auto &t) {
- return tm->get_pins(t, offset, length);
+ return tm->get_pins(t, laddr_t(offset), length);
}).unsafe_get();
return ret;
}
std::list<LBAMappingRef> get_mappings(objaddr_t offset, extent_len_t length) {
auto t = create_mutate_transaction();
auto ret = with_trans_intr(*t, [&](auto &t) {
- return tm->get_pins(t, offset, length);
+ return tm->get_pins(t, laddr_t(offset), length);
}).unsafe_get();
return ret;
}
"seastore_max_data_allocation_size", "8192").get();
}
- laddr_t get_random_laddr(size_t block_size, laddr_t limit) {
+ objaddr_t get_random_write_offset(size_t block_size, objaddr_t limit) {
return block_size *
std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen);
}
for (unsigned j = 0; j < 100; ++j) {
auto t = create_mutate_transaction();
for (unsigned k = 0; k < 2; ++k) {
- write(*t, get_random_laddr(BSIZE, TOTAL), wsize,
+ write(*t, get_random_write_offset(BSIZE, TOTAL), wsize,
(char)((j*k) % std::numeric_limits<char>::max()));
}
submit_transaction(std::move(t));
auto pins = get_mappings(*t, base, len);
assert(pins.size() == 1);
auto pin1 = remap_pin(*t, std::move(pins.front()), 4096, 8192);
- auto ext = get_extent(*t, base + 4096, 4096 * 2);
+ auto ext = get_extent(*t, laddr_t(base + 4096), 4096 * 2);
ASSERT_TRUE(ext->is_exist_clean());
write(*t, base + 4096, 4096, 'y');
ASSERT_TRUE(ext->is_exist_mutation_pending());
: TMTestState(num_main_devices, num_cold_devices), gen(rd()) {
}
- laddr_t get_random_laddr(size_t block_size, laddr_t limit) {
- return block_size *
- std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen);
+ laddr_t get_random_laddr(size_t block_size, size_t limit) {
+ return laddr_t(block_size *
+ std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen));
}
char get_random_contents() {
[this, &overlay](auto &t) {
return lba_manager->scan_mappings(
t,
- 0,
+ L_ADDR_MIN,
L_ADDR_MAX,
[iter=overlay.begin(), &overlay](auto l, auto p, auto len) mutable {
EXPECT_NE(iter, overlay.end());
auto t = create_transaction();
auto extent = alloc_extent(
t,
- i * BSIZE,
+ laddr_t(i * BSIZE),
BSIZE);
- ASSERT_EQ(i * BSIZE, extent->get_laddr());
+ ASSERT_EQ(laddr_t(i * BSIZE), extent->get_laddr());
submit_transaction(std::move(t));
}
boost::make_counting_iterator(0lu),
boost::make_counting_iterator(BLOCKS),
[this, &t](auto i) {
- return tm->read_extent<TestBlock>(t, i * BSIZE, BSIZE
+ return tm->read_extent<TestBlock>(t, laddr_t(i * BSIZE), BSIZE
).si_then([](auto) {
return seastar::now();
});
auto t = create_transaction();
auto extent = alloc_extent(
t,
- i * BSIZE,
+ laddr_t(i * BSIZE),
BSIZE);
- ASSERT_EQ(i * BSIZE, extent->get_laddr());
+ ASSERT_EQ(laddr_t(i * BSIZE), extent->get_laddr());
if (try_submit_transaction(std::move(t)))
break;
}
void test_remap_pin() {
run_async([this] {
disable_max_extent_size();
- constexpr size_t l_offset = 32 << 10;
+ constexpr laddr_t l_offset = laddr_t(32 << 10);
constexpr size_t l_len = 32 << 10;
- constexpr size_t r_offset = 64 << 10;
+ constexpr laddr_t r_offset = laddr_t(64 << 10);
constexpr size_t r_len = 32 << 10;
{
auto t = create_transaction();
void test_clone_and_remap_pin() {
run_async([this] {
disable_max_extent_size();
- constexpr size_t l_offset = 32 << 10;
+ constexpr laddr_t l_offset = laddr_t(32 << 10);
constexpr size_t l_len = 32 << 10;
- constexpr size_t r_offset = 64 << 10;
+ constexpr laddr_t r_offset = laddr_t(64 << 10);
constexpr size_t r_len = 32 << 10;
- constexpr size_t l_clone_offset = 96 << 10;
- constexpr size_t r_clone_offset = 128 << 10;
+ constexpr laddr_t l_clone_offset = laddr_t(96 << 10);
+ constexpr laddr_t r_clone_offset = laddr_t(128 << 10);
{
auto t = create_transaction();
auto lext = alloc_extent(t, l_offset, l_len);
void test_overwrite_pin() {
run_async([this] {
disable_max_extent_size();
- constexpr size_t m_offset = 8 << 10;
+ constexpr laddr_t m_offset = laddr_t(8 << 10);
constexpr size_t m_len = 56 << 10;
- constexpr size_t l_offset = 64 << 10;
+ constexpr laddr_t l_offset = laddr_t(64 << 10);
constexpr size_t l_len = 64 << 10;
- constexpr size_t r_offset = 128 << 10;
+ constexpr laddr_t r_offset = laddr_t(128 << 10);
constexpr size_t r_len = 64 << 10;
{
auto t = create_transaction();
run_async([this] {
disable_max_extent_size();
constexpr unsigned REMAP_NUM = 32;
- constexpr size_t offset = 0;
+ constexpr laddr_t offset = L_ADDR_MIN;
constexpr size_t length = 256 << 10;
{
auto t = create_transaction();
if (off == 0 || off >= 255) {
continue;
}
- auto new_off = (off << 10) - last_pin->get_key();
+ auto new_off = laddr_t(off << 10) - last_pin->get_key();
auto new_len = last_pin->get_length() - new_off;
//always remap right extent at new split_point
auto pin = remap_pin(t, std::move(last_pin), new_off, new_len);
run_async([this] {
disable_max_extent_size();
constexpr unsigned REMAP_NUM = 32;
- constexpr size_t offset = 0;
+ constexpr laddr_t offset = L_ADDR_MIN;
constexpr size_t length = 256 << 10;
{
auto t = create_transaction();
auto end_off = split_points.front();
split_points.pop_front();
ASSERT_TRUE(start_off <= end_off);
- if (((end_off << 10) == pin0->get_key() + pin0->get_length())
+ if ((laddr_t(end_off << 10) == pin0->get_key() + pin0->get_length())
|| (start_off == end_off)) {
if (split_points.empty() && empty_transaction) {
early_exit++;
continue;
}
empty_transaction = false;
- auto new_off = (start_off << 10) - last_rpin->get_key();
+ auto new_off = laddr_t(start_off << 10) - last_rpin->get_key();
auto new_len = (end_off - start_off) << 10;
bufferlist bl;
bl.append(ceph::bufferptr(ceph::buffer::create(new_len, 0)));
TEST_P(tm_random_block_device_test_t, scatter_allocation)
{
run_async([this] {
- constexpr laddr_t ADDR = 0xFF * 4096;
+ constexpr laddr_t ADDR = laddr_t(0xFF * 4096);
epm->prefill_fragmented_devices();
auto t = create_transaction();
for (int i = 0; i < 1991; i++) {
TEST_P(tm_single_device_test_t, basic)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
- constexpr laddr_t ADDR = 0xFF * SIZE;
+ constexpr laddr_t ADDR = laddr_t(0xFF * SIZE);
{
auto t = create_transaction();
auto extent = alloc_extent(
TEST_P(tm_single_device_test_t, mutate)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
- constexpr laddr_t ADDR = 0xFF * SIZE;
+ constexpr laddr_t ADDR = laddr_t(0xFF * SIZE);
{
auto t = create_transaction();
auto extent = alloc_extent(
TEST_P(tm_single_device_test_t, allocate_lba_conflict)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
- constexpr laddr_t ADDR = 0xFF * SIZE;
- constexpr laddr_t ADDR2 = 0xFE * SIZE;
+ constexpr laddr_t ADDR = laddr_t(0xFF * SIZE);
+ constexpr laddr_t ADDR2 = laddr_t(0xFE * SIZE);
auto t = create_transaction();
auto t2 = create_transaction();
TEST_P(tm_single_device_test_t, mutate_lba_conflict)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
{
auto t = create_transaction();
check();
}
- constexpr laddr_t ADDR = 150 * SIZE;
+ constexpr laddr_t ADDR = laddr_t(150 * SIZE);
{
auto t = create_transaction();
auto t2 = create_transaction();
TEST_P(tm_single_device_test_t, concurrent_mutate_lba_no_conflict)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
constexpr size_t NUM = 500;
- constexpr laddr_t addr = 0;
- constexpr laddr_t addr2 = SIZE * (NUM - 1);
- run_async([this] {
+ constexpr laddr_t addr = L_ADDR_MIN;
+ constexpr laddr_t addr2 = laddr_t(SIZE * (NUM - 1));
+ run_async([this, addr, addr2] {
{
auto t = create_transaction();
for (unsigned i = 0; i < NUM; ++i) {
TEST_P(tm_single_device_test_t, create_remove_same_transaction)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
- constexpr laddr_t ADDR = 0xFF * SIZE;
+ constexpr laddr_t ADDR = laddr_t(0xFF * SIZE);
{
auto t = create_transaction();
auto extent = alloc_extent(
TEST_P(tm_single_device_test_t, split_merge_read_same_transaction)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
{
auto t = create_transaction();
TEST_P(tm_single_device_test_t, inc_dec_ref)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
- constexpr laddr_t ADDR = 0xFF * SIZE;
+ constexpr laddr_t ADDR = laddr_t(0xFF * SIZE);
{
auto t = create_transaction();
auto extent = alloc_extent(
TEST_P(tm_single_device_test_t, cause_lba_split)
{
- constexpr laddr_t SIZE = 4096;
+ constexpr size_t SIZE = 4096;
run_async([this] {
for (unsigned i = 0; i < 200; ++i) {
auto t = create_transaction();
auto extent = alloc_extent(
t,
- i * SIZE,
+ laddr_t(i * SIZE),
SIZE,
(char)(i & 0xFF));
- ASSERT_EQ(i * SIZE, extent->get_laddr());
+ ASSERT_EQ(laddr_t(i * SIZE), extent->get_laddr());
submit_transaction(std::move(t));
}
check();
auto t = create_transaction();
auto extent = alloc_extent(
t,
- i * BSIZE,
+ laddr_t(i * BSIZE),
BSIZE);
- ASSERT_EQ(i * BSIZE, extent->get_laddr());
+ ASSERT_EQ(laddr_t(i * BSIZE), extent->get_laddr());
submit_transaction(std::move(t));
}
// pad out transaction
auto paddings = alloc_extents(
t,
- TOTAL + (k * PADDING_SIZE),
+ laddr_t(TOTAL + (k * PADDING_SIZE)),
PADDING_SIZE);
for (auto &padding : paddings) {
dec_ref(t, padding->get_laddr());
TEST_P(tm_single_device_intergrity_check_test_t, remap_lazy_read)
{
- constexpr laddr_t offset = 0;
+ constexpr laddr_t offset = L_ADDR_MIN;
constexpr size_t length = 256 << 10;
run_async([this, offset] {
disable_max_extent_size();
for (int i = 0; i < LEAF_NODE_CAPACITY; i++) {
auto extent = alloc_extent(
t,
- i * 4096,
+ laddr_t(i * 4096),
4096,
'a');
}
{
auto t = create_transaction();
- auto pin = get_pin(t, (LEAF_NODE_CAPACITY - 1) * 4096);
+ auto pin = get_pin(t, laddr_t((LEAF_NODE_CAPACITY - 1) * 4096));
assert(pin->is_parent_viewable());
- auto extent = alloc_extent(t, LEAF_NODE_CAPACITY * 4096, 4096, 'a');
+ auto extent = alloc_extent(t, laddr_t(LEAF_NODE_CAPACITY * 4096), 4096, 'a');
assert(!pin->is_parent_viewable());
- pin = get_pin(t, LEAF_NODE_CAPACITY * 4096);
- std::ignore = alloc_extent(t, (LEAF_NODE_CAPACITY + 1) * 4096, 4096, 'a');
+ pin = get_pin(t, laddr_t(LEAF_NODE_CAPACITY * 4096));
+ std::ignore = alloc_extent(t, laddr_t((LEAF_NODE_CAPACITY + 1) * 4096), 4096, 'a');
assert(pin->is_parent_viewable());
assert(pin->parent_modified());
pin->maybe_fix_pos();