From: Zhang Song Date: Fri, 23 Aug 2024 07:29:08 +0000 (+0800) Subject: crimson/os/seastore: turn laddr_t into a struct X-Git-Tag: v20.0.0~1167^2~9 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=fb2358c0a31805960ed17985b02e1f3679641952;p=ceph.git crimson/os/seastore: turn laddr_t into a struct Signed-off-by: Zhang Song --- diff --git a/src/crimson/os/seastore/backref/backref_tree_node.h b/src/crimson/os/seastore/backref/backref_tree_node.h index c3ff52520ce..3f3c1a2f5c3 100644 --- a/src/crimson/os/seastore/backref/backref_tree_node.h +++ b/src/crimson/os/seastore/backref/backref_tree_node.h @@ -17,7 +17,7 @@ using BackrefNode = FixedKVNode; struct backref_map_val_t { extent_len_t len = 0; ///< length of extents - laddr_t laddr = 0; ///< logical address of extents + laddr_t laddr = L_ADDR_MIN; ///< logical address of extents extent_types_t type = extent_types_t::ROOT; backref_map_val_t() = default; @@ -36,7 +36,7 @@ std::ostream& operator<<(std::ostream &out, const backref_map_val_t& val); struct backref_map_val_le_t { extent_len_le_t len = init_extent_len_le(0); - laddr_le_t laddr = laddr_le_t(0); + laddr_le_t laddr = laddr_le_t(L_ADDR_MIN); extent_types_le_t type = 0; backref_map_val_le_t() = default; diff --git a/src/crimson/os/seastore/laddr_interval_set.h b/src/crimson/os/seastore/laddr_interval_set.h new file mode 100644 index 00000000000..dd83cae5bbb --- /dev/null +++ b/src/crimson/os/seastore/laddr_interval_set.h @@ -0,0 +1,758 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include "crimson/os/seastore/seastore_types.h" + +namespace crimson::os::seastore { +namespace details { + +// this interval_set structure is copied from include/interval_set.h to allow +// use the different type for length as the laddr_t becomes struct, and avoid +// changing the behaviors of other components. +// +// The latest commit is 58860ce3f60489d258aaa10fd783e68083261937 + +template class C = std::map> +class interval_set { + public: + using Map = C; + using value_type = typename Map::value_type; + using offset_type = T; + using length_type = L; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename Map::size_type; + + class const_iterator; + + class iterator + { + public: + using difference_type = ssize_t; + using value_type = typename Map::value_type; + using pointer = typename Map::value_type*; + using reference = typename Map::value_type&; + using iterator_category = std::forward_iterator_tag; + + explicit iterator(typename Map::iterator iter) + : _iter(iter) + { } + + // For the copy constructor and assignment operator, the compiler-generated functions, which + // perform simple bitwise copying, should be fine. + + bool operator==(const iterator& rhs) const { + return (_iter == rhs._iter); + } + + bool operator!=(const iterator& rhs) const { + return (_iter != rhs._iter); + } + + // Dereference this iterator to get a pair. + reference operator*() const { + return *_iter; + } + + // Return the interval start. + offset_type get_start() const { + return _iter->first; + } + + // Return the interval length. + length_type get_len() const { + return _iter->second; + } + + offset_type get_end() const { + return _iter->first + _iter->second; + } + + // Set the interval length. + void set_len(const length_type& len) { + _iter->second = len; + } + + // Preincrement + iterator& operator++() + { + ++_iter; + return *this; + } + + // Postincrement + iterator operator++(int) + { + iterator prev(_iter); + ++_iter; + return prev; + } + + // Predecrement + iterator& operator--() + { + --_iter; + return *this; + } + + // Postdecrement + iterator operator--(int) + { + iterator prev(_iter); + --_iter; + return prev; + } + + friend class interval_set::const_iterator; + + protected: + typename Map::iterator _iter; + friend class interval_set; + }; + + class const_iterator + { + public: + using difference_type = ssize_t; + using value_type = const typename Map::value_type; + using pointer = const typename Map::value_type*; + using reference = const typename Map::value_type&; + using iterator_category = std::forward_iterator_tag; + + explicit const_iterator(typename Map::const_iterator iter) + : _iter(iter) + { } + + const_iterator(const iterator &i) + : _iter(i._iter) + { } + + // For the copy constructor and assignment operator, the compiler-generated functions, which + // perform simple bitwise copying, should be fine. + + bool operator==(const const_iterator& rhs) const { + return (_iter == rhs._iter); + } + + bool operator!=(const const_iterator& rhs) const { + return (_iter != rhs._iter); + } + + // Dereference this iterator to get a pair. + reference operator*() const { + return *_iter; + } + + // Return the interval start. + offset_type get_start() const { + return _iter->first; + } + offset_type get_end() const { + return _iter->first + _iter->second; + } + + // Return the interval length. + length_type get_len() const { + return _iter->second; + } + + // Preincrement + const_iterator& operator++() + { + ++_iter; + return *this; + } + + // Postincrement + const_iterator operator++(int) + { + const_iterator prev(_iter); + ++_iter; + return prev; + } + + // Predecrement + iterator& operator--() + { + --_iter; + return *this; + } + + // Postdecrement + iterator operator--(int) + { + iterator prev(_iter); + --_iter; + return prev; + } + + protected: + typename Map::const_iterator _iter; + }; + + interval_set() = default; + interval_set(Map&& other) { + m.swap(other); + for (const auto& p : m) { + _size += p.second; + } + } + + size_type num_intervals() const + { + return m.size(); + } + + iterator begin() { + return iterator(m.begin()); + } + + iterator lower_bound(T start) { + return iterator(find_inc_m(start)); + } + + iterator end() { + return iterator(m.end()); + } + + const_iterator begin() const { + return const_iterator(m.begin()); + } + + const_iterator lower_bound(T start) const { + return const_iterator(find_inc(start)); + } + + const_iterator end() const { + return const_iterator(m.end()); + } + + // helpers + private: + auto find_inc(T start) const { + auto p = m.lower_bound(start); // p->first >= start + if (p != m.begin() && + (p == m.end() || p->first > start)) { + --p; // might overlap? + if (p->first + p->second <= start) + ++p; // it doesn't. + } + return p; + } + + auto find_inc_m(T start) { + auto p = m.lower_bound(start); + if (p != m.begin() && + (p == m.end() || p->first > start)) { + --p; // might overlap? + if (p->first + p->second <= start) + ++p; // it doesn't. + } + return p; + } + + auto find_adj(T start) const { + auto p = m.lower_bound(start); + if (p != m.begin() && + (p == m.end() || p->first > start)) { + --p; // might touch? + if (p->first + p->second < start) + ++p; // it doesn't. + } + return p; + } + + auto find_adj_m(T start) { + auto p = m.lower_bound(start); + if (p != m.begin() && + (p == m.end() || p->first > start)) { + --p; // might touch? + if (p->first + p->second < start) + ++p; // it doesn't. + } + return p; + } + + void intersection_size_asym(const interval_set &s, const interval_set &l) { + auto ps = s.m.begin(); + ceph_assert(ps != s.m.end()); + auto offset = ps->first; + bool first = true; + auto mi = m.begin(); + + while (1) { + if (first) + first = false; + auto pl = l.find_inc(offset); + if (pl == l.m.end()) + break; + while (ps != s.m.end() && ps->first + ps->second <= pl->first) + ++ps; + if (ps == s.m.end()) + break; + offset = pl->first + pl->second; + if (offset <= ps->first) { + offset = ps->first; + continue; + } + + if (*ps == *pl) { + do { + mi = m.insert(mi, *ps); + _size += ps->second; + ++ps; + ++pl; + } while (ps != s.m.end() && pl != l.m.end() && *ps == *pl); + if (ps == s.m.end()) + break; + offset = ps->first; + continue; + } + + auto start = std::max(ps->first, pl->first); + auto en = std::min(ps->first + ps->second, offset); + ceph_assert(en > start); + mi = m.emplace_hint(mi, start, en - start); + _size += mi->second; + if (ps->first + ps->second <= offset) { + ++ps; + if (ps == s.m.end()) + break; + offset = ps->first; + } + } + } + + bool subset_size_sym(const interval_set &b) const { + auto pa = m.begin(), pb = b.m.begin(); + const auto a_end = m.end(), b_end = b.m.end(); + + while (pa != a_end && pb != b_end) { + while (pb->first + pb->second <= pa->first) { + ++pb; + if (pb == b_end) + return false; + } + + if (*pa == *pb) { + do { + ++pa; + ++pb; + } while (pa != a_end && pb != b_end && *pa == *pb); + continue; + } + + // interval begins before other + if (pa->first < pb->first) + return false; + // interval is longer than other + if (pa->first + pa->second > pb->first + pb->second) + return false; + + ++pa; + } + + return pa == a_end; + } + + public: + bool operator==(const interval_set& other) const { + return _size == other._size && m == other.m; + } + + uint64_t size() const { + return _size; + } + + void bound_encode(size_t& p) const { + denc_traits::bound_encode(m, p); + } + void encode(ceph::buffer::list::contiguous_appender& p) const { + denc(m, p); + } + void decode(ceph::buffer::ptr::const_iterator& p) { + denc(m, p); + _size = 0; + for (const auto& p : m) { + _size += p.second; + } + } + void decode(ceph::buffer::list::iterator& p) { + denc(m, p); + _size = 0; + for (const auto& p : m) { + _size += p.second; + } + } + + void encode_nohead(ceph::buffer::list::contiguous_appender& p) const { + denc_traits::encode_nohead(m, p); + } + void decode_nohead(int n, ceph::buffer::ptr::const_iterator& p) { + denc_traits::decode_nohead(n, m, p); + _size = 0; + for (const auto& p : m) { + _size += p.second; + } + } + + void clear() { + m.clear(); + _size = 0; + } + + bool contains(T i, T *pstart=0, L *plen=0) const { + auto p = find_inc(i); + if (p == m.end()) return false; + if (p->first > i) return false; + if (p->first+p->second <= i) return false; + ceph_assert(p->first <= i && p->first+p->second > i); + if (pstart) + *pstart = p->first; + if (plen) + *plen = p->second; + return true; + } + bool contains(T start, L len) const { + auto p = find_inc(start); + if (p == m.end()) return false; + if (p->first > start) return false; + if (p->first+p->second <= start) return false; + ceph_assert(p->first <= start && p->first+p->second > start); + if (p->first+p->second < start+len) return false; + return true; + } + bool intersects(T start, L len) const { + interval_set a; + a.insert(start, len); + interval_set i; + i.intersection_of( *this, a ); + if (i.empty()) return false; + return true; + } + + // outer range of set + bool empty() const { + return m.empty(); + } + offset_type range_start() const { + ceph_assert(!empty()); + auto p = m.begin(); + return p->first; + } + offset_type range_end() const { + ceph_assert(!empty()); + auto p = m.rbegin(); + return p->first + p->second; + } + + // interval start after p (where p not in set) + bool starts_after(T i) const { + ceph_assert(!contains(i)); + auto p = find_inc(i); + if (p == m.end()) return false; + return true; + } + offset_type start_after(T i) const { + ceph_assert(!contains(i)); + auto p = find_inc(i); + return p->first; + } + + // interval end that contains start + offset_type end_after(T start) const { + ceph_assert(contains(start)); + auto p = find_inc(start); + return p->first+p->second; + } + + void insert(T val) { + insert(val, 1); + } + + void insert(T start, L len, T *pstart=0, L *plen=0) { + //cout << "insert " << start << "~" << len << endl; + ceph_assert(len > 0); + _size += len; + auto p = find_adj_m(start); + if (p == m.end()) { + m[start] = len; // new interval + if (pstart) + *pstart = start; + if (plen) + *plen = len; + } else { + if (p->first < start) { + + if (p->first + p->second != start) { + //cout << "p is " << p->first << "~" << p->second << ", start is " << start << ", len is " << len << endl; + ceph_abort(); + } + + p->second += len; // append to end + + auto n = p; + ++n; + if (pstart) + *pstart = p->first; + if (n != m.end() && + start+len == n->first) { // combine with next, too! + p->second += n->second; + if (plen) + *plen = p->second; + m.erase(n); + } else { + if (plen) + *plen = p->second; + } + } else { + if (start+len == p->first) { + if (pstart) + *pstart = start; + if (plen) + *plen = len + p->second; + L psecond = p->second; + m.erase(p); + m[start] = len + psecond; // append to front + } else { + ceph_assert(p->first > start+len); + if (pstart) + *pstart = start; + if (plen) + *plen = len; + m[start] = len; // new interval + } + } + } + } + + void swap(interval_set& other) { + m.swap(other.m); + std::swap(_size, other._size); + } + + void erase(const iterator &i) { + _size -= i.get_len(); + m.erase(i._iter); + } + + void erase(T val) { + erase(val, 1); + } + + void erase(T start, L len, + std::function claim = {}) { + auto p = find_inc_m(start); + + _size -= len; + + ceph_assert(p != m.end()); + ceph_assert(p->first <= start); + + L before = start - p->first; + ceph_assert(p->second >= before+len); + L after = p->second - before - len; + if (before) { + if (claim && claim(p->first, before)) { + _size -= before; + m.erase(p); + } else { + p->second = before; // shorten bit before + } + } else { + m.erase(p); + } + if (after) { + if (claim && claim(start + len, after)) { + _size -= after; + } else { + m[start + len] = after; + } + } + } + + void subtract(const interval_set &a) { + for (const auto& [start, len] : a.m) { + erase(start, len); + } + } + + void insert(const interval_set &a) { + for (const auto& [start, len] : a.m) { + insert(start, len); + } + } + + + void intersection_of(const interval_set &a, const interval_set &b) { + ceph_assert(&a != this); + ceph_assert(&b != this); + clear(); + + const interval_set *s, *l; + + if (a.size() < b.size()) { + s = &a; + l = &b; + } else { + s = &b; + l = &a; + } + + if (!s->size()) + return; + + /* + * Use the lower_bound algorithm for larger size ratios + * where it performs better, but not for smaller size + * ratios where sequential search performs better. + */ + if (l->size() / s->size() >= 10) { + intersection_size_asym(*s, *l); + return; + } + + auto pa = a.m.begin(); + auto pb = b.m.begin(); + auto mi = m.begin(); + + while (pa != a.m.end() && pb != b.m.end()) { + // passing? + if (pa->first + pa->second <= pb->first) + { pa++; continue; } + if (pb->first + pb->second <= pa->first) + { pb++; continue; } + + if (*pa == *pb) { + do { + mi = m.insert(mi, *pa); + _size += pa->second; + ++pa; + ++pb; + } while (pa != a.m.end() && pb != b.m.end() && *pa == *pb); + continue; + } + + T start = std::max(pa->first, pb->first); + T en = std::min(pa->first+pa->second, pb->first+pb->second); + ceph_assert(en > start); + mi = m.emplace_hint(mi, start, en - start); + _size += mi->second; + if (pa->first+pa->second > pb->first+pb->second) + pb++; + else + pa++; + } + } + void intersection_of(const interval_set& b) { + interval_set a; + swap(a); + intersection_of(a, b); + } + + void union_of(const interval_set &a, const interval_set &b) { + ceph_assert(&a != this); + ceph_assert(&b != this); + clear(); + + //cout << "union_of" << endl; + + // a + m = a.m; + _size = a._size; + + // - (a*b) + interval_set ab; + ab.intersection_of(a, b); + subtract(ab); + + // + b + insert(b); + return; + } + void union_of(const interval_set &b) { + interval_set a; + swap(a); + union_of(a, b); + } + void union_insert(T off, L len) { + interval_set a; + a.insert(off, len); + union_of(a); + } + + bool subset_of(const interval_set &big) const { + if (!size()) + return true; + if (size() > big.size()) + return false; + if (range_end() > big.range_end()) + return false; + + /* + * Use the lower_bound algorithm for larger size ratios + * where it performs better, but not for smaller size + * ratios where sequential search performs better. + */ + if (big.size() / size() < 10) + return subset_size_sym(big); + + for (const auto& [start, len] : m) { + if (!big.contains(start, len)) return false; + } + return true; + } + + /* + * build a subset of @other, starting at or after @start, and including + * @len worth of values, skipping holes. e.g., + * span_of([5~10,20~5], 8, 5) -> [8~2,20~3] + */ + void span_of(const interval_set &other, T start, L len) { + clear(); + auto p = other.find_inc(start); + if (p == other.m.end()) + return; + if (p->first < start) { + if (p->first + p->second < start) + return; + if (p->first + p->second < start + len) { + L howmuch = p->second - (start - p->first); + insert(start, howmuch); + len -= howmuch; + p++; + } else { + insert(start, len); + return; + } + } + while (p != other.m.end() && len > 0) { + if (p->second < len) { + insert(p->first, p->second); + len -= p->second; + p++; + } else { + insert(p->first, len); + return; + } + } + } + + /* + * Move contents of m into another Map. Use that instead of + * encoding interval_set into bufferlist then decoding it back into Map. + */ + Map detach() && { + return std::move(m); + } + +private: + // data + uint64_t _size = 0; + Map m; // map start -> len +}; +} // namespace details +using laddr_interval_set_t = details::interval_set; +} // namespace crimson::os::seastore diff --git a/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc b/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc index bf0a8e3ec79..e7564ad61b5 100644 --- a/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc +++ b/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc @@ -431,7 +431,7 @@ BtreeLBAManager::_alloc_extents( return iter.next(c).si_then([&state, &alloc_info](auto it) { state.insert_iter = it; if (alloc_info.key == L_ADDR_NULL) { - state.last_end += alloc_info.len; + state.last_end = state.last_end + alloc_info.len; } }); }); diff --git a/src/crimson/os/seastore/object_data_handler.cc b/src/crimson/os/seastore/object_data_handler.cc index 9de32b89de1..8e6f06b79ee 100644 --- a/src/crimson/os/seastore/object_data_handler.cc +++ b/src/crimson/os/seastore/object_data_handler.cc @@ -7,6 +7,7 @@ #include "crimson/common/log.h" #include "crimson/os/seastore/object_data_handler.h" +#include "crimson/os/seastore/laddr_interval_set.h" namespace { seastar::logger& logger() { @@ -169,7 +170,7 @@ struct extent_to_remap_t { nullptr, new_offset, new_len, p->get_key(), p->get_length(), b); } - uint64_t laddr_start; + laddr_t laddr_start; extent_len_t length; std::optional bl; @@ -180,7 +181,7 @@ private: pin(std::move(pin)), new_offset(new_offset), new_len(new_len) {} extent_to_remap_t(type_t type, LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len, - uint64_t ori_laddr, extent_len_t ori_len, std::optional b) + laddr_t ori_laddr, extent_len_t ori_len, std::optional b) : type(type), pin(std::move(pin)), new_offset(new_offset), new_len(new_len), laddr_start(ori_laddr), length(ori_len), bl(b) {} @@ -294,7 +295,7 @@ overwrite_ops_t prepare_ops_list( } } - interval_set pre_alloc_addr_removed, pre_alloc_addr_remapped; + laddr_interval_set_t pre_alloc_addr_removed, pre_alloc_addr_remapped; if (delta_based_overwrite_max_extent_size) { for (auto &r : ops.to_remove) { if (r->is_data_stable() && !r->is_zero_reserved()) { @@ -321,7 +322,7 @@ overwrite_ops_t prepare_ops_list( erased_num = std::erase_if( ops.to_remove, [®ion, &to_remap](auto &r) { - interval_set range; + laddr_interval_set_t range; range.insert(r->get_key(), r->get_length()); if (range.contains(region.addr, region.len) && !r->is_clone()) { to_remap.push_back(extent_to_remap_t::create_overwrite( @@ -337,7 +338,7 @@ overwrite_ops_t prepare_ops_list( erased_num = std::erase_if( ops.to_remap, [®ion, &to_remap](auto &r) { - interval_set range; + laddr_interval_set_t range; range.insert(r.pin->get_key(), r.pin->get_length()); if (range.contains(region.addr, region.len) && !r.pin->is_clone()) { to_remap.push_back(extent_to_remap_t::create_overwrite( @@ -519,7 +520,7 @@ ObjectDataHandler::write_ret do_insertions( [ctx](auto ®ion) { LOG_PREFIX(object_data_handler.cc::do_insertions); if (region.is_data()) { - assert_aligned(region.addr); + ceph_assert(region.addr.is_aligned(ctx.tm.get_block_size())); assert_aligned(region.len); ceph_assert(region.len == region.bl->length()); DEBUGT("allocating extent: {}~{}", @@ -544,7 +545,7 @@ ObjectDataHandler::write_ret do_insertions( off); } iter.copy(extent->get_length(), extent->get_bptr().c_str()); - off += extent->get_length(); + off = off + extent->get_length(); left -= extent->get_length(); } return ObjectDataHandler::write_iertr::now(); @@ -707,8 +708,8 @@ public: right_paddr(pins.back()->get_val()), data_begin(offset), data_end(offset + len), - aligned_data_begin(p2align((uint64_t)data_begin, (uint64_t)block_size)), - aligned_data_end(p2roundup((uint64_t)data_end, (uint64_t)block_size)), + aligned_data_begin(data_begin.get_aligned_laddr(block_size)), + aligned_data_end(data_end.get_roundup_laddr(block_size)), left_operation(overwrite_operation_t::UNKNOWN), right_operation(overwrite_operation_t::UNKNOWN), block_size(block_size), @@ -725,10 +726,10 @@ public: private: // refer to overwrite_plan_t description void validate() const { - ceph_assert(pin_begin % block_size == 0); - ceph_assert(pin_end % block_size == 0); - ceph_assert(aligned_data_begin % block_size == 0); - ceph_assert(aligned_data_end % block_size == 0); + ceph_assert(pin_begin.is_aligned(block_size)); + ceph_assert(pin_end.is_aligned(block_size)); + ceph_assert(aligned_data_begin.is_aligned(block_size)); + ceph_assert(aligned_data_end.is_aligned(block_size)); ceph_assert(pin_begin <= aligned_data_begin); ceph_assert(aligned_data_begin <= data_begin); @@ -1190,8 +1191,8 @@ extent_to_write_list_t get_to_writes_with_zero_buffer( laddr_t offset, extent_len_t len, std::optional &&headptr, std::optional &&tailptr) { - auto zero_left = p2roundup(offset, (laddr_t)block_size); - auto zero_right = p2align(offset + len, (laddr_t)block_size); + auto zero_left = offset.get_roundup_laddr(block_size); + auto zero_right = (offset + len).get_aligned_laddr(block_size); auto left = headptr ? (offset - headptr->length()) : offset; auto right = tailptr ? (offset + len + tailptr->length()) : @@ -1207,8 +1208,8 @@ extent_to_write_list_t get_to_writes_with_zero_buffer( (!tailptr && (right == zero_right))); assert(right > left); - assert((left % block_size) == 0); - assert((right % block_size) == 0); + assert(left.is_aligned(block_size)); + assert(right.is_aligned(block_size)); // zero region too small for a reserved section, // headptr and tailptr in same extent @@ -1322,8 +1323,8 @@ ObjectDataHandler::write_ret ObjectDataHandler::overwrite( bufferlist write_bl; if (headptr) { write_bl.append(*headptr); - write_offset -= headptr->length(); - assert_aligned(write_offset); + write_offset = write_offset - headptr->length(); + ceph_assert(write_offset.is_aligned(ctx.tm.get_block_size())); } write_bl.claim_append(*bl); if (tailptr) { diff --git a/src/crimson/os/seastore/omap_manager/btree/omap_types.h b/src/crimson/os/seastore/omap_manager/btree/omap_types.h index 9e0d10e0358..089e59676e8 100644 --- a/src/crimson/os/seastore/omap_manager/btree/omap_types.h +++ b/src/crimson/os/seastore/omap_manager/btree/omap_types.h @@ -46,7 +46,7 @@ struct omap_node_meta_le_t { struct omap_inner_key_t { uint16_t key_off = 0; uint16_t key_len = 0; - laddr_t laddr = 0; + laddr_t laddr = L_ADDR_MIN; omap_inner_key_t() = default; omap_inner_key_t(uint16_t off, uint16_t len, laddr_t addr) @@ -70,7 +70,7 @@ struct omap_inner_key_t { struct omap_inner_key_le_t { ceph_le16 key_off{0}; ceph_le16 key_len{0}; - laddr_le_t laddr{0}; + laddr_le_t laddr{L_ADDR_MIN}; omap_inner_key_le_t() = default; omap_inner_key_le_t(const omap_inner_key_le_t &) = default; diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h b/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h index 04ff5477127..ab5971dfe5f 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h @@ -150,7 +150,7 @@ class DummyNodeExtentManager final: public NodeExtentManager { Transaction& t, extent_len_t len) { assert(len % ALIGNMENT == 0); auto r = ceph::buffer::create_aligned(len, ALIGNMENT); - auto addr = reinterpret_cast(r->get_data()); + auto addr = laddr_t(reinterpret_cast(r->get_data())); auto bp = ceph::bufferptr(std::move(r)); auto extent = Ref(new DummyNodeExtent(std::move(bp))); extent->set_laddr(addr); diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/key_layout.h b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/key_layout.h index fcd485355f5..18ebb039f89 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/key_layout.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/key_layout.h @@ -46,9 +46,9 @@ static laddr_t get_lba_hint(shard_t shard, pool_t pool, crush_hash_t crush) { // FIXME: It is possible that PGs from different pools share the same prefix // if the mask 0xFF is not long enough, result in unexpected transaction // conflicts. - return ((uint64_t)(shard & 0XFF)<<56 | - (uint64_t)(pool & 0xFF)<<48 | - (uint64_t)(crush )<<16); + return laddr_t((uint64_t)(shard & 0xFF)<<56 | + (uint64_t)(pool & 0xFF)<<48 | + (uint64_t)(crush )<<16); } struct node_offset_packed_t { diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/value.cc b/src/crimson/os/seastore/onode_manager/staged-fltree/value.cc index 694480d4ead..306f2b1bd7e 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/value.cc +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/value.cc @@ -138,7 +138,7 @@ void validate_tree_config(const tree_conf_t& conf) #define _STAGE_T(NodeType) node_to_stage_t #define NXT_T(StageType) staged - laddr_t i_value{0}; + laddr_t i_value = L_ADDR_MIN; auto insert_size_2 = _STAGE_T(InternalNode0)::insert_size(key, i_value); auto insert_size_0 = diff --git a/src/crimson/os/seastore/seastore_types.cc b/src/crimson/os/seastore/seastore_types.cc index 5dc87d2d60b..2f480482660 100644 --- a/src/crimson/os/seastore/seastore_types.cc +++ b/src/crimson/os/seastore/seastore_types.cc @@ -92,6 +92,10 @@ std::ostream& operator<<(std::ostream& out, segment_seq_printer_t seq) } } +std::ostream &operator<<(std::ostream &out, const laddr_t &laddr) { + return out << 'L' << std::hex << laddr.value << std::dec; +} + std::ostream &operator<<(std::ostream &out, const pladdr_t &pladdr) { if (pladdr.is_laddr()) { diff --git a/src/crimson/os/seastore/seastore_types.h b/src/crimson/os/seastore/seastore_types.h index df53bab798f..8f3ec38537f 100644 --- a/src/crimson/os/seastore/seastore_types.h +++ b/src/crimson/os/seastore/seastore_types.h @@ -192,7 +192,7 @@ private: std::ostream &operator<<(std::ostream &out, const segment_id_t&); // ondisk type of segment_id_t -struct __attribute((packed)) segment_id_le_t { +struct __attribute__((packed)) segment_id_le_t { ceph_le32 segment = ceph_le32(segment_id_t().segment); segment_id_le_t(const segment_id_t id) : @@ -853,7 +853,7 @@ inline paddr_t paddr_t::block_relative_to(paddr_t rhs) const { return as_res_paddr().block_relative_to(rhs.as_res_paddr()); } -struct __attribute((packed)) paddr_le_t { +struct __attribute__((packed)) paddr_le_t { ceph_le64 internal_paddr = ceph_le64(P_ADDR_NULL.internal_paddr); @@ -1007,32 +1007,100 @@ constexpr journal_seq_t JOURNAL_SEQ_MAX{ constexpr journal_seq_t JOURNAL_SEQ_NULL = JOURNAL_SEQ_MAX; // logical addr, see LBAManager, TransactionManager -using laddr_t = uint64_t; -constexpr laddr_t L_ADDR_MIN = std::numeric_limits::min(); -constexpr laddr_t L_ADDR_MAX = std::numeric_limits::max(); +class laddr_t { +public: + // the type of underlying integer + using Unsigned = uint64_t; + static constexpr Unsigned RAW_VALUE_MAX = + std::numeric_limits::max(); + + constexpr laddr_t() : laddr_t(RAW_VALUE_MAX) {} + constexpr explicit laddr_t(Unsigned value) : value(value) {} + + bool is_aligned(Unsigned alignment) const { + assert(alignment != 0); + assert((alignment & (alignment - 1)) == 0); + return value == p2align(value, alignment); + } + + laddr_t get_aligned_laddr(Unsigned alignment) const { + assert(alignment != 0); + assert((alignment & (alignment - 1)) == 0); + return laddr_t(p2align(value, alignment)); + } + + laddr_t get_roundup_laddr(Unsigned alignment) const { + assert(alignment != 0); + assert((alignment & (alignment - 1)) == 0); + return laddr_t(p2roundup(value, alignment)); + } + + /// laddr_t works like primitive integer type, encode/decode it manually + void encode(::ceph::buffer::list::contiguous_appender& p) const { + p.append(reinterpret_cast(&value), sizeof(Unsigned)); + } + void bound_encode(size_t& p) const { + p += sizeof(Unsigned); + } + void decode(::ceph::buffer::ptr::const_iterator& p) { + assert(static_cast(p.get_end() - p.get_pos()) >= sizeof(Unsigned)); + memcpy((char *)&value, p.get_pos_add(sizeof(Unsigned)), sizeof(Unsigned)); + } + + friend std::ostream &operator<<(std::ostream &, const laddr_t &); + + friend auto operator<=>(const laddr_t&, const laddr_t&) = default; + + friend laddr_t operator+(const laddr_t &laddr, const Unsigned &i) { + return laddr_t{laddr.value + i}; + } + + friend laddr_t operator+(const Unsigned &i, const laddr_t &laddr) { + return laddr_t{laddr.value + i}; + } + + friend laddr_t operator-(const laddr_t &laddr, const Unsigned &i) { + return laddr_t{laddr.value - i}; + } + + friend Unsigned operator-(const laddr_t &l, const laddr_t &r) { + return l.value - r.value; + } + + friend struct laddr_le_t; + friend struct pladdr_le_t; + +private: + Unsigned value; +}; + +constexpr laddr_t L_ADDR_MAX = laddr_t(laddr_t::RAW_VALUE_MAX); +constexpr laddr_t L_ADDR_MIN = laddr_t(0); constexpr laddr_t L_ADDR_NULL = L_ADDR_MAX; -constexpr laddr_t L_ADDR_ROOT = L_ADDR_MAX - 1; -constexpr laddr_t L_ADDR_LBAT = L_ADDR_MAX - 2; +constexpr laddr_t L_ADDR_ROOT = laddr_t(laddr_t::RAW_VALUE_MAX - 1); +constexpr laddr_t L_ADDR_LBAT = laddr_t(laddr_t::RAW_VALUE_MAX - 2); -struct __attribute((packed)) laddr_le_t { - ceph_le64 laddr = ceph_le64(L_ADDR_NULL); +struct __attribute__((packed)) laddr_le_t { + ceph_le64 laddr; using orig_type = laddr_t; - laddr_le_t() = default; + laddr_le_t() : laddr_le_t(L_ADDR_NULL) {} laddr_le_t(const laddr_le_t &) = default; explicit laddr_le_t(const laddr_t &addr) - : laddr(ceph_le64(addr)) {} + : laddr(addr.value) {} operator laddr_t() const { return laddr_t(laddr); } laddr_le_t& operator=(laddr_t addr) { ceph_le64 val; - val = addr; + val = addr.value; laddr = val; return *this; } + + bool operator==(const laddr_le_t&) const = default; }; constexpr uint64_t PL_ADDR_NULL = std::numeric_limits::max(); @@ -1087,7 +1155,7 @@ enum class addr_type_t : uint8_t { MAX=2 // or NONE }; -struct __attribute((packed)) pladdr_le_t { +struct __attribute__((packed)) pladdr_le_t { ceph_le64 pladdr = ceph_le64(PL_ADDR_NULL); addr_type_t addr_type = addr_type_t::MAX; @@ -1097,7 +1165,7 @@ struct __attribute((packed)) pladdr_le_t { : pladdr( ceph_le64( addr.is_laddr() ? - std::get<0>(addr.pladdr) : + std::get<0>(addr.pladdr).value : std::get<1>(addr.pladdr).internal_paddr)), addr_type( addr.is_laddr() ? @@ -2658,6 +2726,7 @@ struct cache_stats_t { WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::seastore_meta_t) WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_id_t) +WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::laddr_t) WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::paddr_t) WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::journal_seq_t) WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::delta_info_t) @@ -2679,6 +2748,7 @@ template <> struct fmt::formatter : template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; diff --git a/src/crimson/os/seastore/transaction_manager.h b/src/crimson/os/seastore/transaction_manager.h index 1c090196dd4..b245cf6dcf3 100644 --- a/src/crimson/os/seastore/transaction_manager.h +++ b/src/crimson/os/seastore/transaction_manager.h @@ -348,7 +348,7 @@ public: LOG_PREFIX(TransactionManager::alloc_non_data_extent); SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}", t, T::TYPE, len, placement_hint, laddr_hint); - ceph_assert(is_aligned(laddr_hint, epm->get_block_size())); + ceph_assert(laddr_hint.is_aligned(epm->get_block_size())); auto ext = cache->alloc_new_non_data_extent( t, len, @@ -388,7 +388,7 @@ public: LOG_PREFIX(TransactionManager::alloc_data_extents); SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}", t, T::TYPE, len, placement_hint, laddr_hint); - ceph_assert(is_aligned(laddr_hint, epm->get_block_size())); + ceph_assert(laddr_hint.is_aligned(epm->get_block_size())); auto exts = cache->alloc_new_data_extents( t, len, @@ -582,7 +582,7 @@ public: extent_len_t len) { LOG_PREFIX(TransactionManager::reserve_region); SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}", t, len, hint); - ceph_assert(is_aligned(hint, epm->get_block_size())); + ceph_assert(hint.is_aligned(epm->get_block_size())); return lba_manager->reserve_region( t, hint, @@ -615,7 +615,7 @@ public: LOG_PREFIX(TransactionManager::clone_pin); SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}, clone_offset {}", t, mapping.get_length(), hint, intermediate_key); - ceph_assert(is_aligned(hint, epm->get_block_size())); + ceph_assert(hint.is_aligned(epm->get_block_size())); return lba_manager->clone_mapping( t, hint, diff --git a/src/crimson/tools/store_nbd/tm_driver.cc b/src/crimson/tools/store_nbd/tm_driver.cc index 078e33bf8c4..860ce517db8 100644 --- a/src/crimson/tools/store_nbd/tm_driver.cc +++ b/src/crimson/tools/store_nbd/tm_driver.cc @@ -27,20 +27,20 @@ seastar::future<> TMDriver::write( "write", [this, offset, &ptr](auto& t) { - return tm->remove(t, offset + return tm->remove(t, laddr_t(offset) ).discard_result().handle_error_interruptible( crimson::ct_error::enoent::handle([](auto) { return seastar::now(); }), crimson::ct_error::pass_further_all{} ).si_then([this, offset, &t, &ptr] { logger().debug("dec_ref complete"); - return tm->alloc_data_extents(t, offset, ptr.length()); + return tm->alloc_data_extents(t, laddr_t(offset), ptr.length()); }).si_then([this, offset, &t, &ptr](auto extents) mutable { boost::ignore_unused(offset); // avoid clang warning; auto off = offset; auto left = ptr.length(); size_t written = 0; for (auto &ext : extents) { - assert(ext->get_laddr() == (size_t)off); + assert(ext->get_laddr() == laddr_t(off)); assert(ext->get_bptr().length() <= left); ptr.copy_out(written, ext->get_length(), ext->get_bptr().c_str()); off += ext->get_length(); @@ -111,9 +111,9 @@ seastar::future TMDriver::read( "read", [=, &blret, this](auto& t) { - return read_extents(t, offset, size + return read_extents(t, laddr_t(offset), size ).si_then([=, &blret](auto ext_list) { - size_t cur = offset; + laddr_t cur(offset); for (auto &i: ext_list) { if (cur != i.first) { assert(cur < i.first); diff --git a/src/test/crimson/seastore/onode_tree/test_fltree_onode_manager.cc b/src/test/crimson/seastore/onode_tree/test_fltree_onode_manager.cc index f1a0cf65e82..2365f3a0da4 100644 --- a/src/test/crimson/seastore/onode_tree/test_fltree_onode_manager.cc +++ b/src/test/crimson/seastore/onode_tree/test_fltree_onode_manager.cc @@ -32,7 +32,7 @@ struct onode_item_t { void initialize(Transaction& t, Onode& value) const { auto &ftvalue = static_cast(value); ftvalue.update_onode_size(t, size); - auto oroot = omap_root_t(id, cnt_modify, + auto oroot = omap_root_t(laddr_t(id), cnt_modify, value.get_metadata_hint(block_size)); ftvalue.update_omap_root(t, oroot); validate(value); @@ -41,7 +41,7 @@ struct onode_item_t { void validate(Onode& value) const { auto& layout = value.get_layout(); ceph_assert(laddr_t(layout.size) == laddr_t{size}); - ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).addr == id); + ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).addr == laddr_t(id)); ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).depth == cnt_modify); } diff --git a/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc b/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc index 48b65909143..349fcfa14e2 100644 --- a/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc +++ b/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc @@ -127,7 +127,7 @@ TEST_F(a_basic_test_t, 1_basic_sizes) value.payload_size = 8; #define _STAGE_T(NodeType) node_to_stage_t #define NXT_T(StageType) staged - laddr_t i_value{0}; + laddr_t i_value = L_ADDR_MIN; logger().info("\n" "Bytes of a key-value insertion (full-string):\n" " s-p-c, 'n'-'o', s-g => value_payload(8): typically internal 43B, leaf 59B\n" @@ -1047,8 +1047,8 @@ class DummyChildPool { static Ref create_new( const std::set& keys, bool is_level_tail, DummyChildPool& pool) { - static laddr_t seed = 0; - return create(keys, is_level_tail, seed++, pool); + static uint64_t seed = 0; + return create(keys, is_level_tail, laddr_t(seed++), pool); } static eagain_ifuture> create_initial( diff --git a/src/test/crimson/seastore/test_btree_lba_manager.cc b/src/test/crimson/seastore/test_btree_lba_manager.cc index fd8e7684e34..67204023a75 100644 --- a/src/test/crimson/seastore/test_btree_lba_manager.cc +++ b/src/test/crimson/seastore/test_btree_lba_manager.cc @@ -378,14 +378,14 @@ TEST_F(lba_btree_test, basic) run_async([this] { constexpr unsigned total = 16<<10; for (unsigned i = 0; i < total; i += 16) { - insert(i, 8); + insert(laddr_t(i), 8); } for (unsigned i = 0; i < total; i += 16) { - check_lower_bound(i); - check_lower_bound(i + 4); - check_lower_bound(i + 8); - check_lower_bound(i + 12); + check_lower_bound(laddr_t(i)); + check_lower_bound(laddr_t(i + 4)); + check_lower_bound(laddr_t(i + 8)); + check_lower_bound(laddr_t(i + 12)); } }); } @@ -665,7 +665,7 @@ struct btree_lba_manager_test : btree_test_base { [=, &t, this](auto &) { return lba_manager->scan_mappings( *t.t, - 0, + L_ADDR_MIN, L_ADDR_MAX, [iter=t.mappings.begin(), &t](auto l, auto p, auto len) mutable { EXPECT_NE(iter, t.mappings.end()); @@ -681,7 +681,7 @@ struct btree_lba_manager_test : btree_test_base { TEST_F(btree_lba_manager_test, basic) { run_async([this] { - laddr_t laddr = 0x12345678 * block_size; + laddr_t laddr = laddr_t(0x12345678 * block_size); { // write initial mapping auto t = create_transaction(); @@ -701,7 +701,7 @@ TEST_F(btree_lba_manager_test, force_split) auto t = create_transaction(); logger().debug("opened transaction"); for (unsigned j = 0; j < 5; ++j) { - alloc_mappings(t, 0, block_size); + alloc_mappings(t, L_ADDR_MIN, block_size); if ((i % 10 == 0) && (j == 3)) { check_mappings(t); check_mappings(); @@ -721,7 +721,7 @@ TEST_F(btree_lba_manager_test, force_split_merge) auto t = create_transaction(); logger().debug("opened transaction"); for (unsigned j = 0; j < 5; ++j) { - auto rets = alloc_mappings(t, 0, block_size); + auto rets = alloc_mappings(t, L_ADDR_MIN, block_size); // just to speed things up a bit if ((i % 100 == 0) && (j == 3)) { check_mappings(t); @@ -780,7 +780,7 @@ TEST_F(btree_lba_manager_test, single_transaction_split_merge) { auto t = create_transaction(); for (unsigned i = 0; i < 400; ++i) { - alloc_mappings(t, 0, block_size); + alloc_mappings(t, L_ADDR_MIN, block_size); } check_mappings(t); submit_test_transaction(std::move(t)); @@ -803,7 +803,7 @@ TEST_F(btree_lba_manager_test, single_transaction_split_merge) { auto t = create_transaction(); for (unsigned i = 0; i < 600; ++i) { - alloc_mappings(t, 0, block_size); + alloc_mappings(t, L_ADDR_MIN, block_size); } auto addresses = get_mapped_addresses(t); for (unsigned i = 0; i != addresses.size(); ++i) { @@ -831,23 +831,23 @@ TEST_F(btree_lba_manager_test, split_merge_multi) } }; iterate([&](auto &t, auto idx) { - alloc_mappings(t, idx * block_size, block_size); + alloc_mappings(t, laddr_t(idx * block_size), block_size); }); check_mappings(); iterate([&](auto &t, auto idx) { if ((idx % 32) > 0) { - decref_mapping(t, idx * block_size); + decref_mapping(t, laddr_t(idx * block_size)); } }); check_mappings(); iterate([&](auto &t, auto idx) { if ((idx % 32) > 0) { - alloc_mappings(t, idx * block_size, block_size); + alloc_mappings(t, laddr_t(idx * block_size), block_size); } }); check_mappings(); iterate([&](auto &t, auto idx) { - decref_mapping(t, idx * block_size); + decref_mapping(t, laddr_t(idx * block_size)); }); check_mappings(); }); diff --git a/src/test/crimson/seastore/test_object_data_handler.cc b/src/test/crimson/seastore/test_object_data_handler.cc index 89559f1cbc8..fe47d46a912 100644 --- a/src/test/crimson/seastore/test_object_data_handler.cc +++ b/src/test/crimson/seastore/test_object_data_handler.cc @@ -218,14 +218,14 @@ struct object_data_handler_test_t: objaddr_t offset, extent_len_t length) { auto ret = with_trans_intr(t, [&](auto &t) { - return tm->get_pins(t, offset, length); + return tm->get_pins(t, laddr_t(offset), length); }).unsafe_get(); return ret; } std::list get_mappings(objaddr_t offset, extent_len_t length) { auto t = create_mutate_transaction(); auto ret = with_trans_intr(*t, [&](auto &t) { - return tm->get_pins(t, offset, length); + return tm->get_pins(t, laddr_t(offset), length); }).unsafe_get(); return ret; } @@ -297,7 +297,7 @@ struct object_data_handler_test_t: "seastore_max_data_allocation_size", "8192").get(); } - laddr_t get_random_laddr(size_t block_size, laddr_t limit) { + objaddr_t get_random_write_offset(size_t block_size, objaddr_t limit) { return block_size * std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen); } @@ -769,7 +769,7 @@ TEST_P(object_data_handler_test_t, random_overwrite) { for (unsigned j = 0; j < 100; ++j) { auto t = create_mutate_transaction(); for (unsigned k = 0; k < 2; ++k) { - write(*t, get_random_laddr(BSIZE, TOTAL), wsize, + write(*t, get_random_write_offset(BSIZE, TOTAL), wsize, (char)((j*k) % std::numeric_limits::max())); } submit_transaction(std::move(t)); @@ -798,7 +798,7 @@ TEST_P(object_data_handler_test_t, overwrite_then_read_within_transaction) { auto pins = get_mappings(*t, base, len); assert(pins.size() == 1); auto pin1 = remap_pin(*t, std::move(pins.front()), 4096, 8192); - auto ext = get_extent(*t, base + 4096, 4096 * 2); + auto ext = get_extent(*t, laddr_t(base + 4096), 4096 * 2); ASSERT_TRUE(ext->is_exist_clean()); write(*t, base + 4096, 4096, 'y'); ASSERT_TRUE(ext->is_exist_mutation_pending()); diff --git a/src/test/crimson/seastore/test_transaction_manager.cc b/src/test/crimson/seastore/test_transaction_manager.cc index e4b750577a1..f2c8c3ccab3 100644 --- a/src/test/crimson/seastore/test_transaction_manager.cc +++ b/src/test/crimson/seastore/test_transaction_manager.cc @@ -66,9 +66,9 @@ struct transaction_manager_test_t : : TMTestState(num_main_devices, num_cold_devices), gen(rd()) { } - laddr_t get_random_laddr(size_t block_size, laddr_t limit) { - return block_size * - std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen); + laddr_t get_random_laddr(size_t block_size, size_t limit) { + return laddr_t(block_size * + std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen)); } char get_random_contents() { @@ -719,7 +719,7 @@ struct transaction_manager_test_t : [this, &overlay](auto &t) { return lba_manager->scan_mappings( t, - 0, + L_ADDR_MIN, L_ADDR_MAX, [iter=overlay.begin(), &overlay](auto l, auto p, auto len) mutable { EXPECT_NE(iter, overlay.end()); @@ -830,9 +830,9 @@ struct transaction_manager_test_t : auto t = create_transaction(); auto extent = alloc_extent( t, - i * BSIZE, + laddr_t(i * BSIZE), BSIZE); - ASSERT_EQ(i * BSIZE, extent->get_laddr()); + ASSERT_EQ(laddr_t(i * BSIZE), extent->get_laddr()); submit_transaction(std::move(t)); } @@ -844,7 +844,7 @@ struct transaction_manager_test_t : boost::make_counting_iterator(0lu), boost::make_counting_iterator(BLOCKS), [this, &t](auto i) { - return tm->read_extent(t, i * BSIZE, BSIZE + return tm->read_extent(t, laddr_t(i * BSIZE), BSIZE ).si_then([](auto) { return seastar::now(); }); @@ -870,9 +870,9 @@ struct transaction_manager_test_t : auto t = create_transaction(); auto extent = alloc_extent( t, - i * BSIZE, + laddr_t(i * BSIZE), BSIZE); - ASSERT_EQ(i * BSIZE, extent->get_laddr()); + ASSERT_EQ(laddr_t(i * BSIZE), extent->get_laddr()); if (try_submit_transaction(std::move(t))) break; } @@ -1346,9 +1346,9 @@ struct transaction_manager_test_t : void test_remap_pin() { run_async([this] { disable_max_extent_size(); - constexpr size_t l_offset = 32 << 10; + constexpr laddr_t l_offset = laddr_t(32 << 10); constexpr size_t l_len = 32 << 10; - constexpr size_t r_offset = 64 << 10; + constexpr laddr_t r_offset = laddr_t(64 << 10); constexpr size_t r_len = 32 << 10; { auto t = create_transaction(); @@ -1400,12 +1400,12 @@ struct transaction_manager_test_t : void test_clone_and_remap_pin() { run_async([this] { disable_max_extent_size(); - constexpr size_t l_offset = 32 << 10; + constexpr laddr_t l_offset = laddr_t(32 << 10); constexpr size_t l_len = 32 << 10; - constexpr size_t r_offset = 64 << 10; + constexpr laddr_t r_offset = laddr_t(64 << 10); constexpr size_t r_len = 32 << 10; - constexpr size_t l_clone_offset = 96 << 10; - constexpr size_t r_clone_offset = 128 << 10; + constexpr laddr_t l_clone_offset = laddr_t(96 << 10); + constexpr laddr_t r_clone_offset = laddr_t(128 << 10); { auto t = create_transaction(); auto lext = alloc_extent(t, l_offset, l_len); @@ -1455,11 +1455,11 @@ struct transaction_manager_test_t : void test_overwrite_pin() { run_async([this] { disable_max_extent_size(); - constexpr size_t m_offset = 8 << 10; + constexpr laddr_t m_offset = laddr_t(8 << 10); constexpr size_t m_len = 56 << 10; - constexpr size_t l_offset = 64 << 10; + constexpr laddr_t l_offset = laddr_t(64 << 10); constexpr size_t l_len = 64 << 10; - constexpr size_t r_offset = 128 << 10; + constexpr laddr_t r_offset = laddr_t(128 << 10); constexpr size_t r_len = 64 << 10; { auto t = create_transaction(); @@ -1538,7 +1538,7 @@ struct transaction_manager_test_t : run_async([this] { disable_max_extent_size(); constexpr unsigned REMAP_NUM = 32; - constexpr size_t offset = 0; + constexpr laddr_t offset = L_ADDR_MIN; constexpr size_t length = 256 << 10; { auto t = create_transaction(); @@ -1575,7 +1575,7 @@ struct transaction_manager_test_t : if (off == 0 || off >= 255) { continue; } - auto new_off = (off << 10) - last_pin->get_key(); + auto new_off = laddr_t(off << 10) - last_pin->get_key(); auto new_len = last_pin->get_length() - new_off; //always remap right extent at new split_point auto pin = remap_pin(t, std::move(last_pin), new_off, new_len); @@ -1620,7 +1620,7 @@ struct transaction_manager_test_t : run_async([this] { disable_max_extent_size(); constexpr unsigned REMAP_NUM = 32; - constexpr size_t offset = 0; + constexpr laddr_t offset = L_ADDR_MIN; constexpr size_t length = 256 << 10; { auto t = create_transaction(); @@ -1665,7 +1665,7 @@ struct transaction_manager_test_t : auto end_off = split_points.front(); split_points.pop_front(); ASSERT_TRUE(start_off <= end_off); - if (((end_off << 10) == pin0->get_key() + pin0->get_length()) + if ((laddr_t(end_off << 10) == pin0->get_key() + pin0->get_length()) || (start_off == end_off)) { if (split_points.empty() && empty_transaction) { early_exit++; @@ -1674,7 +1674,7 @@ struct transaction_manager_test_t : continue; } empty_transaction = false; - auto new_off = (start_off << 10) - last_rpin->get_key(); + auto new_off = laddr_t(start_off << 10) - last_rpin->get_key(); auto new_len = (end_off - start_off) << 10; bufferlist bl; bl.append(ceph::bufferptr(ceph::buffer::create(new_len, 0))); @@ -1766,7 +1766,7 @@ struct tm_random_block_device_test_t : TEST_P(tm_random_block_device_test_t, scatter_allocation) { run_async([this] { - constexpr laddr_t ADDR = 0xFF * 4096; + constexpr laddr_t ADDR = laddr_t(0xFF * 4096); epm->prefill_fragmented_devices(); auto t = create_transaction(); for (int i = 0; i < 1991; i++) { @@ -1782,9 +1782,9 @@ TEST_P(tm_random_block_device_test_t, scatter_allocation) TEST_P(tm_single_device_test_t, basic) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { - constexpr laddr_t ADDR = 0xFF * SIZE; + constexpr laddr_t ADDR = laddr_t(0xFF * SIZE); { auto t = create_transaction(); auto extent = alloc_extent( @@ -1803,9 +1803,9 @@ TEST_P(tm_single_device_test_t, basic) TEST_P(tm_single_device_test_t, mutate) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { - constexpr laddr_t ADDR = 0xFF * SIZE; + constexpr laddr_t ADDR = laddr_t(0xFF * SIZE); { auto t = create_transaction(); auto extent = alloc_extent( @@ -1841,10 +1841,10 @@ TEST_P(tm_single_device_test_t, mutate) TEST_P(tm_single_device_test_t, allocate_lba_conflict) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { - constexpr laddr_t ADDR = 0xFF * SIZE; - constexpr laddr_t ADDR2 = 0xFE * SIZE; + constexpr laddr_t ADDR = laddr_t(0xFF * SIZE); + constexpr laddr_t ADDR2 = laddr_t(0xFE * SIZE); auto t = create_transaction(); auto t2 = create_transaction(); @@ -1874,7 +1874,7 @@ TEST_P(tm_single_device_test_t, allocate_lba_conflict) TEST_P(tm_single_device_test_t, mutate_lba_conflict) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { { auto t = create_transaction(); @@ -1889,7 +1889,7 @@ TEST_P(tm_single_device_test_t, mutate_lba_conflict) check(); } - constexpr laddr_t ADDR = 150 * SIZE; + constexpr laddr_t ADDR = laddr_t(150 * SIZE); { auto t = create_transaction(); auto t2 = create_transaction(); @@ -1913,11 +1913,11 @@ TEST_P(tm_single_device_test_t, mutate_lba_conflict) TEST_P(tm_single_device_test_t, concurrent_mutate_lba_no_conflict) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; constexpr size_t NUM = 500; - constexpr laddr_t addr = 0; - constexpr laddr_t addr2 = SIZE * (NUM - 1); - run_async([this] { + constexpr laddr_t addr = L_ADDR_MIN; + constexpr laddr_t addr2 = laddr_t(SIZE * (NUM - 1)); + run_async([this, addr, addr2] { { auto t = create_transaction(); for (unsigned i = 0; i < NUM; ++i) { @@ -1945,9 +1945,9 @@ TEST_P(tm_single_device_test_t, concurrent_mutate_lba_no_conflict) TEST_P(tm_single_device_test_t, create_remove_same_transaction) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { - constexpr laddr_t ADDR = 0xFF * SIZE; + constexpr laddr_t ADDR = laddr_t(0xFF * SIZE); { auto t = create_transaction(); auto extent = alloc_extent( @@ -1976,7 +1976,7 @@ TEST_P(tm_single_device_test_t, create_remove_same_transaction) TEST_P(tm_single_device_test_t, split_merge_read_same_transaction) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { { auto t = create_transaction(); @@ -2006,9 +2006,9 @@ TEST_P(tm_single_device_test_t, split_merge_read_same_transaction) TEST_P(tm_single_device_test_t, inc_dec_ref) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { - constexpr laddr_t ADDR = 0xFF * SIZE; + constexpr laddr_t ADDR = laddr_t(0xFF * SIZE); { auto t = create_transaction(); auto extent = alloc_extent( @@ -2053,16 +2053,16 @@ TEST_P(tm_single_device_test_t, inc_dec_ref) TEST_P(tm_single_device_test_t, cause_lba_split) { - constexpr laddr_t SIZE = 4096; + constexpr size_t SIZE = 4096; run_async([this] { for (unsigned i = 0; i < 200; ++i) { auto t = create_transaction(); auto extent = alloc_extent( t, - i * SIZE, + laddr_t(i * SIZE), SIZE, (char)(i & 0xFF)); - ASSERT_EQ(i * SIZE, extent->get_laddr()); + ASSERT_EQ(laddr_t(i * SIZE), extent->get_laddr()); submit_transaction(std::move(t)); } check(); @@ -2080,9 +2080,9 @@ TEST_P(tm_single_device_test_t, random_writes) auto t = create_transaction(); auto extent = alloc_extent( t, - i * BSIZE, + laddr_t(i * BSIZE), BSIZE); - ASSERT_EQ(i * BSIZE, extent->get_laddr()); + ASSERT_EQ(laddr_t(i * BSIZE), extent->get_laddr()); submit_transaction(std::move(t)); } @@ -2098,7 +2098,7 @@ TEST_P(tm_single_device_test_t, random_writes) // pad out transaction auto paddings = alloc_extents( t, - TOTAL + (k * PADDING_SIZE), + laddr_t(TOTAL + (k * PADDING_SIZE)), PADDING_SIZE); for (auto &padding : paddings) { dec_ref(t, padding->get_laddr()); @@ -2131,7 +2131,7 @@ TEST_P(tm_single_device_test_t, find_hole_assert_trigger) TEST_P(tm_single_device_intergrity_check_test_t, remap_lazy_read) { - constexpr laddr_t offset = 0; + constexpr laddr_t offset = L_ADDR_MIN; constexpr size_t length = 256 << 10; run_async([this, offset] { disable_max_extent_size(); @@ -2184,7 +2184,7 @@ TEST_P(tm_single_device_test_t, invalid_lba_mapping_detect) for (int i = 0; i < LEAF_NODE_CAPACITY; i++) { auto extent = alloc_extent( t, - i * 4096, + laddr_t(i * 4096), 4096, 'a'); } @@ -2193,12 +2193,12 @@ TEST_P(tm_single_device_test_t, invalid_lba_mapping_detect) { auto t = create_transaction(); - auto pin = get_pin(t, (LEAF_NODE_CAPACITY - 1) * 4096); + auto pin = get_pin(t, laddr_t((LEAF_NODE_CAPACITY - 1) * 4096)); assert(pin->is_parent_viewable()); - auto extent = alloc_extent(t, LEAF_NODE_CAPACITY * 4096, 4096, 'a'); + auto extent = alloc_extent(t, laddr_t(LEAF_NODE_CAPACITY * 4096), 4096, 'a'); assert(!pin->is_parent_viewable()); - pin = get_pin(t, LEAF_NODE_CAPACITY * 4096); - std::ignore = alloc_extent(t, (LEAF_NODE_CAPACITY + 1) * 4096, 4096, 'a'); + pin = get_pin(t, laddr_t(LEAF_NODE_CAPACITY * 4096)); + std::ignore = alloc_extent(t, laddr_t((LEAF_NODE_CAPACITY + 1) * 4096), 4096, 'a'); assert(pin->is_parent_viewable()); assert(pin->parent_modified()); pin->maybe_fix_pos();