t,
limit,
config.rewrite_dirty_bytes_per_cycle
- ).si_then([=, &t](auto dirty_list) {
+ ).si_then([=, &t, this](auto dirty_list) {
LOG_PREFIX(AsyncCleaner::rewrite_dirty);
DEBUGT("rewrite {} dirty extents", t, dirty_list.size());
return seastar::do_with(
});
});
}),
- [=](auto &cursor, auto &dhandler) {
+ [=, this](auto &cursor, auto &dhandler) {
return sm_group.scan_valid_records(
cursor,
header.segment_nonce,
auto retref = std::make_unique<size_t>(0);
auto &budget_used = *retref;
return crimson::repeat(
- [=, &cursor, &budget_used, &handler]() mutable
+ [=, &cursor, &budget_used, &handler, this]() mutable
-> scan_valid_records_ertr::future<seastar::stop_iteration> {
- return [=, &handler, &cursor, &budget_used] {
+ return [=, &handler, &cursor, &budget_used, this] {
if (!cursor.last_valid_header_found) {
return read_validate_record_metadata(cursor.seq.offset, nonce
).safe_then([=, &cursor](auto md) {
cursor.emplace_record_group(header, std::move(md_bl));
return scan_valid_records_ertr::now();
}
- }).safe_then([=, &cursor, &budget_used, &handler] {
+ }).safe_then([=, &cursor, &budget_used, &handler, this] {
DEBUG("processing committed record groups until {}, {} pending",
cursor.last_committed,
cursor.pending_record_groups.size());
return crimson::repeat(
- [=, &budget_used, &cursor, &handler] {
+ [=, &budget_used, &cursor, &handler, this] {
if (cursor.pending_record_groups.empty()) {
/* This is only possible if the segment is empty.
* A record's last_commited must be prior to its own
auto& [peer, messages] = osd_messages;
logger().debug("dispatch_context_messages sending messages to {}", peer);
return seastar::parallel_for_each(
- std::move(messages), [=, peer=peer](auto& m) {
+ std::move(messages), [=, peer=peer, this](auto& m) {
return send_to_osd(peer, std::move(m), local_state.osdmap->get_epoch());
});
});
assert(size % (size_t)device->get_block_size() == 0);
auto blptrret = std::make_unique<bufferlist>();
auto &blret = *blptrret;
- return repeat_eagain([=, &blret] {
+ return repeat_eagain([=, &blret, this] {
return tm->with_transaction_intr(
Transaction::src_t::READ,
"read",
- [=, &blret](auto& t)
+ [=, &blret, this](auto& t)
{
return read_extents(t, offset, size
).si_then([=, &blret](auto ext_list) {
void insert(laddr_t addr, extent_len_t len) {
ceph_assert(check.count(addr) == 0);
check.emplace(addr, get_map_val(len));
- lba_btree_update([=](auto &btree, auto &t) {
+ lba_btree_update([=, this](auto &btree, auto &t) {
return btree.insert(
get_op_context(t), addr, get_map_val(len)
).si_then([](auto){});
ceph_assert(iter != check.end());
auto len = iter->second.len;
check.erase(iter++);
- lba_btree_update([=](auto &btree, auto &t) {
+ lba_btree_update([=, this](auto &btree, auto &t) {
return btree.lower_bound(
get_op_context(t), addr
).si_then([this, len, addr, &btree, &t](auto iter) {
void check_lower_bound(laddr_t addr) {
auto iter = check.lower_bound(addr);
- auto result = lba_btree_read([=](auto &btree, auto &t) {
+ auto result = lba_btree_read([=, this](auto &btree, auto &t) {
return btree.lower_bound(
get_op_context(t), addr
).si_then([](auto iter)
paddr_t paddr) {
auto ret = with_trans_intr(
*t.t,
- [=](auto &t) {
+ [=, this](auto &t) {
return lba_manager->alloc_extent(t, hint, len, paddr);
}).unsafe_get0();
logger().debug("alloc'd: {}", *ret);
auto refcnt = with_trans_intr(
*t.t,
- [=](auto &t) {
+ [=, this](auto &t) {
return lba_manager->decref_extent(
t,
target->first);
target->second.refcount++;
auto refcnt = with_trans_intr(
*t.t,
- [=](auto &t) {
+ [=, this](auto &t) {
return lba_manager->incref_extent(
t,
target->first);
auto ret_list = with_trans_intr(
*t.t,
- [=](auto &t) {
+ [=, this](auto &t) {
return lba_manager->get_mappings(
t, laddr, len);
}).unsafe_get0();
auto ret_pin = with_trans_intr(
*t.t,
- [=](auto &t) {
+ [=, this](auto &t) {
return lba_manager->get_mapping(
t, laddr);
}).unsafe_get0();
}
with_trans_intr(
*t.t,
- [=, &t](auto &) {
+ [=, &t, this](auto &) {
return lba_manager->scan_mappings(
*t.t,
0,