LOG_PREFIX(Cache::add_extent);
assert(ref->is_valid());
extents.insert(*ref);
-
if (ref->is_dirty()) {
add_to_dirty(ref);
} else {
{
LOG_PREFIX(Cache::commit_retire_extent);
DEBUGT("extent {}", t, *ref);
- assert(ref->is_valid());
+ remove_extent(ref);
- // TODO: why does this duplicate remove_extent?
- if (ref->is_dirty()) {
- remove_from_dirty(ref);
- } else {
- lru.remove_from_lru(*ref);
- }
ref->dirty_from_or_retired_at = JOURNAL_SEQ_MAX;
-
invalidate_extent(t, *ref);
- extents.erase(*ref);
}
void Cache::commit_replace_extent(
{
switch (type) {
case extent_types_t::ROOT:
- assert(0 == "ROOT is never directly alloc'd");
+ ceph_assert(0 == "ROOT is never directly alloc'd");
return CachedExtentRef();
case extent_types_t::LADDR_INTERNAL:
return alloc_new_extent<lba_manager::btree::LBAInternalNode>(t, length, delay);
}
DEBUGT("read_set validated", t);
t.read_set.clear();
+ t.write_set.clear();
record_t record;
- t.write_set.clear();
-
// Add new copy of mutated blocks, set_io_wait to block until written
record.deltas.reserve(t.mutated_block_list.size());
for (auto &i: t.mutated_block_list) {
assert(i->get_version() > 0);
auto final_crc = i->get_crc32c();
if (i->get_type() == extent_types_t::ROOT) {
+ assert(t.root == i);
root = t.root;
DEBUGT("writing out root delta for {}", t, *t.root);
record.push_back(
i->prepare_write();
bl.append(i->get_bptr());
if (i->get_type() == extent_types_t::ROOT) {
- assert(0 == "ROOT never gets written as a fresh block");
+ ceph_assert(0 == "ROOT never gets written as a fresh block");
}
assert(bl.length() == i->get_length());
intrusive_ptr_release(ptr);
}
assert(stats.dirty_bytes == 0);
- clear_lru();
+ lru.clear();
return close_ertr::now();
}
for (auto i = dirty.begin();
i != dirty.end() && bytes_so_far < max_bytes;
++i) {
- if (i->get_dirty_from() != journal_seq_t() && i->get_dirty_from() < seq) {
+ auto dirty_from = i->get_dirty_from();
+ ceph_assert(dirty_from != journal_seq_t() &&
+ dirty_from != JOURNAL_SEQ_MAX &&
+ dirty_from != NO_DELTAS);
+ if (dirty_from < seq) {
DEBUGT("next {}", t, *i);
- if (!(cand.empty() ||
- cand.back()->get_dirty_from() <= i->get_dirty_from())) {
+ if (!cand.empty() && cand.back()->get_dirty_from() > dirty_from) {
ERRORT("last {}, next {}", t, *cand.back(), *i);
+ ceph_abort();
}
- assert(cand.empty() || cand.back()->get_dirty_from() <= i->get_dirty_from());
bytes_so_far += i->get_length();
cand.push_back(&*i);
-
} else {
break;
}
if (ext->get_type() == extent_types_t::ROOT) {
if (t.root) {
assert(&*t.root == &*ext);
- assert(0 == "t.root would have to already be in the read set");
+ ceph_assert(0 == "t.root would have to already be in the read set");
} else {
assert(&*ext == &*root);
t.root = root;
switch (type) {
case extent_types_t::ROOT:
- assert(0 == "ROOT is never directly read");
+ ceph_assert(0 == "ROOT is never directly read");
return get_extent_ertr::make_ready_future<CachedExtentRef>();
case extent_types_t::LADDR_INTERNAL:
return get_extent<lba_manager::btree::LBAInternalNode>(
return get_extent<T>(t, offset, length, [](T &){});
}
-
- /**
- * get_extent_by_type
- *
- * Based on type, instantiate the correct concrete type
- * and read in the extent at location offset~length.
- */
private:
// This is a workaround std::move_only_function not being available,
// not really worth generalizing at this time.
}
public:
+ /**
+ * get_extent_by_type
+ *
+ * Based on type, instantiate the correct concrete type
+ * and read in the extent at location offset~length.
+ */
template <typename Func>
get_extent_by_type_ret get_extent_by_type(
Transaction &t, ///< [in] transaction
return ret;
}
- void clear_lru() {
- lru.clear();
- }
-
void mark_delayed_extent_inline(
Transaction& t,
LogicalCachedExtentRef& ref) {
// journal replay should has been finished at this point,
// Cache::root should have been inserted to the dirty list
assert(root->is_dirty());
- std::vector<CachedExtentRef> dirty;
+ std::vector<CachedExtentRef> _dirty;
for (auto &e : extents) {
- dirty.push_back(CachedExtentRef(&e));
+ _dirty.push_back(CachedExtentRef(&e));
}
return seastar::do_with(
std::forward<F>(f),
- std::move(dirty),
+ std::move(_dirty),
[this, &t](auto &f, auto &refs) mutable
{
return trans_intr::do_for_each(