LOG_PREFIX(Cache::prepare_record);
DEBUGT("enter", t);
+ auto trans_src = t.get_src();
assert(!t.is_weak());
- assert(t.get_src() != Transaction::src_t::READ);
+ assert(trans_src != Transaction::src_t::READ);
auto& efforts = get_by_src(stats.committed_efforts_by_src,
- t.get_src());
+ trans_src);
// Should be valid due to interruptible future
for (auto &i: t.read_set) {
auto& ool_stats = t.get_ool_write_stats();
ceph_assert(ool_stats.extents.num == t.ool_block_list.size());
- // FIXME: prevent submitting empty records
if (record.is_empty()) {
- ERRORT("record is empty!", t);
+ // XXX: improve osd logic to not submit empty transactions.
+ DEBUGT("record to submit is empty, src={}!", t, trans_src);
assert(t.onode_tree_stats.is_clear());
assert(t.lba_tree_stats.is_clear());
assert(ool_stats.is_clear());
- ++(efforts.num_trans);
- } else {
- DEBUGT("record is ready to submit, src={}, mdsize={}, dsize={}; "
- "{} ool records, mdsize={}, dsize={}, fillness={}",
- t, t.get_src(),
- record.size.get_raw_mdlength(),
- record.size.dlength,
- ool_stats.num_records,
- ool_stats.header_raw_bytes,
- ool_stats.data_bytes,
- ((double)(ool_stats.header_raw_bytes + ool_stats.data_bytes) /
- (ool_stats.header_bytes + ool_stats.data_bytes)));
- if (t.get_src() == Transaction::src_t::CLEANER_TRIM ||
- t.get_src() == Transaction::src_t::CLEANER_RECLAIM) {
- // CLEANER transaction won't contain any onode tree operations
- assert(t.onode_tree_stats.is_clear());
- } else {
- if (t.onode_tree_stats.depth) {
- stats.onode_tree_depth = t.onode_tree_stats.depth;
- }
- get_by_src(stats.committed_onode_tree_efforts, t.get_src()
- ).increment(t.onode_tree_stats);
- }
+ }
- if (t.lba_tree_stats.depth) {
- stats.lba_tree_depth = t.lba_tree_stats.depth;
+ DEBUGT("record is ready to submit, src={}, mdsize={}, dsize={}; "
+ "{} ool records, mdsize={}, dsize={}, fillness={}",
+ t, trans_src,
+ record.size.get_raw_mdlength(),
+ record.size.dlength,
+ ool_stats.num_records,
+ ool_stats.header_raw_bytes,
+ ool_stats.data_bytes,
+ ((double)(ool_stats.header_raw_bytes + ool_stats.data_bytes) /
+ (ool_stats.header_bytes + ool_stats.data_bytes)));
+ if (trans_src == Transaction::src_t::CLEANER_TRIM ||
+ trans_src == Transaction::src_t::CLEANER_RECLAIM) {
+ // CLEANER transaction won't contain any onode tree operations
+ assert(t.onode_tree_stats.is_clear());
+ } else {
+ if (t.onode_tree_stats.depth) {
+ stats.onode_tree_depth = t.onode_tree_stats.depth;
}
- get_by_src(stats.committed_lba_tree_efforts, t.get_src()
- ).increment(t.lba_tree_stats);
+ get_by_src(stats.committed_onode_tree_efforts, trans_src
+ ).increment(t.onode_tree_stats);
+ }
- ++(efforts.num_trans);
- efforts.num_ool_records += ool_stats.num_records;
- efforts.ool_record_padding_bytes +=
- (ool_stats.header_bytes - ool_stats.header_raw_bytes);
- efforts.ool_record_metadata_bytes += ool_stats.header_raw_bytes;
- efforts.ool_record_data_bytes += ool_stats.data_bytes;
- efforts.inline_record_metadata_bytes +=
- (record.size.get_raw_mdlength() - record.get_delta_size());
+ if (t.lba_tree_stats.depth) {
+ stats.lba_tree_depth = t.lba_tree_stats.depth;
}
+ get_by_src(stats.committed_lba_tree_efforts, trans_src
+ ).increment(t.lba_tree_stats);
+
+ ++(efforts.num_trans);
+ efforts.num_ool_records += ool_stats.num_records;
+ efforts.ool_record_padding_bytes +=
+ (ool_stats.header_bytes - ool_stats.header_raw_bytes);
+ efforts.ool_record_metadata_bytes += ool_stats.header_raw_bytes;
+ efforts.ool_record_data_bytes += ool_stats.data_bytes;
+ efforts.inline_record_metadata_bytes +=
+ (record.size.get_raw_mdlength() - record.get_delta_size());
return record;
}
extent_len_t record_size_t::get_raw_mdlength() const
{
- // FIXME: prevent submitting empty records
- // assert(!is_empty());
+ // empty record is allowed to submit
return plain_mdlength +
ceph::encoded_sizeof_bounded<record_header_t>();
}
const record_size_t& rsize,
extent_len_t _block_size)
{
- // FIXME: prevent submitting empty records
- // assert(!rsize.is_empty());
+ // empty record is allowed to submit
assert(_block_size > 0);
assert(rsize.dlength % _block_size == 0);
assert(block_size == 0 || block_size == _block_size);
ceph::os::Transaction t;
meta_coll->create(t);
meta_coll->store_superblock(t, superblock);
+ logger().debug("OSD::_write_superblock: do_transaction...");
return store.do_transaction(meta_coll->collection(), std::move(t));
});
}
superblock.clean_thru = last;
}
meta_coll->store_superblock(t, superblock);
+ logger().debug("OSD::handle_osd_map: do_transaction...");
return store.do_transaction(meta_coll->collection(), std::move(t));
});
}).then([=] {
PG::~PG() {}
bool PG::try_flush_or_schedule_async() {
+ logger().debug("PG::try_flush_or_schedule_async: do_transaction...");
(void)shard_services.get_store().do_transaction(
coll_ref,
ObjectStore::Transaction()).then(
decode(log_entries, p);
peering_state.append_log(std::move(log_entries), req->pg_trim_to,
req->version, req->min_last_complete_ondisk, txn, !txn.empty(), false);
+ logger().debug("PG::handle_rep_op: do_transaction...");
return interruptor::make_interruptible(shard_services.get_store().do_transaction(
coll_ref, std::move(txn))).then_interruptible(
[req, lcod=peering_state.get_info().last_complete, this] {
m.stats,
m.op == MOSDPGBackfill::OP_BACKFILL_PROGRESS,
t);
+ logger().debug("RecoveryBackend::handle_backfill_progress: do_transaction...");
return shard_services.get_store().do_transaction(
pg.get_collection_ref(), std::move(t)).or_terminate();
}
t.remove(pg.get_collection_ref()->get_cid(),
ghobject_t(soid, ghobject_t::NO_GEN, pg.get_pg_whoami().shard));
}
+ logger().debug("RecoveryBackend::handle_backfill_remove: do_transaction...");
return shard_services.get_store().do_transaction(
pg.get_collection_ref(), std::move(t)).or_terminate();
}
bufferlist encoded_txn;
encode(txn, encoded_txn);
+ logger().debug("ReplicatedBackend::_submit_transaction: do_transaction...");
auto all_completed = interruptor::make_interruptible(
shard_services.get_store().do_transaction(coll, std::move(txn)))
.then_interruptible([this, peers=pending_txn->second.weak_from_this()] {
logger().debug("{}", __func__);
ceph::os::Transaction t;
pg.get_recovery_handler()->on_local_recover(soid, _recovery_info, is_delete, t);
+ logger().debug("ReplicatedRecoveryBackend::on_local_recover_persist: do_transaction...");
return interruptor::make_interruptible(
shard_services.get_store().do_transaction(coll, std::move(t)))
.then_interruptible(
[this, lomt = std::move(lomt)](auto& txn) {
return backend->remove(lomt->os, txn).then_interruptible(
[this, &txn]() mutable {
+ logger().debug("ReplicatedRecoveryBackend::local_recover_delete: do_transaction...");
return shard_services.get_store().do_transaction(coll,
std::move(txn));
});
return _handle_pull_response(from, pop, &response, &t).then_interruptible(
[this, &t](bool complete) {
epoch_t epoch_frozen = pg.get_osdmap_epoch();
+ logger().debug("ReplicatedRecoveryBackend::handle_pull_response: do_transaction...");
return shard_services.get_store().do_transaction(coll, std::move(t))
.then([this, epoch_frozen, complete,
last_complete = pg.get_info().last_complete] {
return _handle_push(m->from, pop, &response, &t).then_interruptible(
[this, &t] {
epoch_t epoch_frozen = pg.get_osdmap_epoch();
+ logger().debug("ReplicatedRecoveryBackend::handle_push: do_transaction...");
return interruptor::make_interruptible(
shard_services.get_store().do_transaction(coll, std::move(t))).then_interruptible(
[this, epoch_frozen, last_complete = pg.get_info().last_complete] {
seastar::future<> ShardServices::dispatch_context_transaction(
crimson::os::CollectionRef col, PeeringCtx &ctx) {
+ logger().debug("ShardServices::dispatch_context_transaction: do_transaction...");
auto ret = store.do_transaction(
col,
std::move(ctx.transaction));