Follow the principle of fewer "<<" calls and higher performance.
Merge multiple output "<<" calls.
Signed-off-by: Yin Congmin <congmin.yin@intel.com>
}
friend std::ostream& operator<< (std::ostream& os, const BlockExtent& block_extent) {
- os << "[block_start = " << block_extent.block_start << ", "
- << "block_end = " << block_extent.block_end << ")";
+ os << "[block_start=" << block_extent.block_start
+ << ", block_end=" << block_extent.block_end << "]";
return os;
}
};
int detain(const BlockExtent &block_extent, BlockOperation *block_operation,
BlockGuardCell **cell) {
std::lock_guard locker{m_lock};
- ldout(m_cct, 20) << block_extent << ", "
- << "free_slots=" << m_free_detained_block_extents.size()
+ ldout(m_cct, 20) << block_extent
+ << ", free_slots="
+ << m_free_detained_block_extents.size()
<< dendl;
DetainedBlockExtent *detained_block_extent;
ceph_assert(cell != nullptr);
auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
*cell);
- ldout(m_cct, 20) << detained_block_extent.block_extent << ", "
- << "pending_ops="
+ ldout(m_cct, 20) << detained_block_extent.block_extent
+ << ", pending_ops="
<< detained_block_extent.block_operations.size()
<< dendl;
C_ReadRequest *read_ctx = m_builder->create_read_request(
cct, now, m_perfcounter, bl, on_finish);
ldout(cct, 20) << "name: " << m_image_ctx.name << " id: " << m_image_ctx.id
- << "image_extents=" << image_extents << ", "
- << "bl=" << bl << ", "
- << "on_finish=" << on_finish << dendl;
+ << "image_extents=" << image_extents
+ << ", bl=" << bl
+ << ", on_finish=" << on_finish << dendl;
ceph_assert(m_initialized);
bl->clear();
}
}
- ldout(cct, 20) << "miss_extents=" << read_ctx->miss_extents << ", "
- << "miss_bl=" << read_ctx->miss_bl << dendl;
+ ldout(cct, 20) << "miss_extents=" << read_ctx->miss_extents
+ << ", miss_bl=" << read_ctx->miss_bl << dendl;
complete_read(log_entries_to_read, bls_to_read, ctx);
}
std::advance(last_in_batch, ops_to_append);
ops.splice(ops.end(), m_ops_to_append, m_ops_to_append.begin(), last_in_batch);
ops_remain = true; /* Always check again before leaving */
- ldout(m_image_ctx.cct, 20) << "appending " << ops.size() << ", "
- << m_ops_to_append.size() << " remain" << dendl;
+ ldout(m_image_ctx.cct, 20) << "appending " << ops.size() << ", remain "
+ << m_ops_to_append.size() << dendl;
} else if (isRWL) {
ops_remain = false;
if (appending) {
<< invalidate << ")" << dendl;
if (m_log_entries.size()) {
ldout(m_image_ctx.cct, 1) << "m_log_entries.size()="
- << m_log_entries.size() << ", "
- << "front()=" << *m_log_entries.front()
+ << m_log_entries.size()
+ << ", front()=" << *m_log_entries.front()
<< dendl;
}
if (invalidate) {
namespace pwl {
std::ostream& GenericLogEntry::format(std::ostream &os) const {
- os << "ram_entry=[" << ram_entry << "], "
- << "cache_entry=" << (void*)cache_entry << ", "
- << "log_entry_index=" << log_entry_index << ", "
- << "completed=" << completed;
+ os << "ram_entry=[" << ram_entry
+ << "], cache_entry=" << (void*)cache_entry
+ << ", log_entry_index=" << log_entry_index
+ << ", completed=" << completed;
return os;
}
std::ostream& SyncPointLogEntry::format(std::ostream &os) const {
os << "(Sync Point) ";
GenericLogEntry::format(os);
- os << ", "
- << "writes=" << writes << ", "
- << "bytes=" << bytes << ", "
- << "writes_completed=" << writes_completed << ", "
- << "writes_flushed=" << writes_flushed << ", "
- << "prior_sync_point_flushed=" << prior_sync_point_flushed << ", "
- << "next_sync_point_entry=" << next_sync_point_entry;
+ os << ", writes=" << writes
+ << ", bytes=" << bytes
+ << ", writes_completed=" << writes_completed
+ << ", writes_flushed=" << writes_flushed
+ << ", prior_sync_point_flushed=" << prior_sync_point_flushed
+ << ", next_sync_point_entry=" << next_sync_point_entry;
return os;
}
std::ostream& GenericWriteLogEntry::format(std::ostream &os) const {
GenericLogEntry::format(os);
- os << ", "
- << "sync_point_entry=[";
+ os << ", sync_point_entry=[";
if (sync_point_entry) {
os << *sync_point_entry;
} else {
os << "nullptr";
}
- os << "], "
- << "referring_map_entries=" << referring_map_entries;
+ os << "], referring_map_entries=" << referring_map_entries;
return os;
}
std::ostream& WriteLogEntry::format(std::ostream &os) const {
os << "(Write) ";
GenericWriteLogEntry::format(os);
- os << ", "
- << "cache_buffer=" << (void*)cache_buffer << ", ";
- os << "cache_bp=" << cache_bp << ", ";
- os << "bl_refs=" << bl_refs;
+ os << ", cache_buffer=" << (void*)cache_buffer;
+ os << ", cache_bp=" << cache_bp;
+ os << ", bl_refs=" << bl_refs;
return os;
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
LogMapEntry<T> &e) {
- os << "block_extent=" << e.block_extent << ", "
- << "log_entry=[" << e.log_entry << "]";
+ os << "block_extent=" << e.block_extent
+ << ", log_entry=[" << e.log_entry << "]";
return os;
}
}
std::ostream& GenericLogOperation::format(std::ostream &os) const {
- os << "dispatch_time=[" << dispatch_time << "], "
- << "buf_persist_start_time=[" << buf_persist_start_time << "], "
- << "buf_persist_comp_time=[" << buf_persist_comp_time << "], "
- << "log_append_start_time=[" << log_append_start_time << "], "
- << "log_append_comp_time=[" << log_append_comp_time << "], ";
+ os << "dispatch_time=[" << dispatch_time
+ << "], buf_persist_start_time=[" << buf_persist_start_time
+ << "], buf_persist_comp_time=[" << buf_persist_comp_time
+ << "], log_append_start_time=[" << log_append_start_time
+ << "], log_append_comp_time=[" << log_append_comp_time << "]";
return os;
}
std::ostream &SyncPointLogOperation::format(std::ostream &os) const {
os << "(Sync Point) ";
GenericLogOperation::format(os);
- os << ", "
- << "sync_point=[" << *sync_point << "]";
+ os << ", sync_point=[" << *sync_point << "]";
return os;
}
std::string op_name = is_writesame ? "(Write Same) " : "(Write) ";
os << op_name;
GenericWriteLogOperation::format(os);
- os << ", ";
if (log_entry) {
- os << "log_entry=[" << *log_entry << "], ";
+ os << ", log_entry=[" << *log_entry << "]";
} else {
- os << "log_entry=nullptr, ";
+ os << ", log_entry=nullptr";
}
- os << "bl=[" << bl << "],"
- << "buffer_alloc=" << buffer_alloc;
+ os << ", bl=[" << bl << "], buffer_alloc=" << buffer_alloc;
return os;
}
std::ostream &operator<<(std::ostream &os,
const WriteLogOperationSet &s) {
- os << "cell=" << (void*)s.cell << ", "
- << "extent_ops_appending=[" << s.extent_ops_appending << ", "
- << "extent_ops_persist=[" << s.extent_ops_persist << "]";
+ os << "cell=" << (void*)s.cell
+ << ", extent_ops_appending=" << s.extent_ops_appending
+ << ", extent_ops_persist=" << s.extent_ops_persist;
return os;
}
std::ostream &DiscardLogOperation::format(std::ostream &os) const {
os << "(Discard) ";
GenericWriteLogOperation::format(os);
- os << ", ";
if (log_entry) {
- os << "log_entry=[" << *log_entry << "], ";
+ os << ", log_entry=[" << *log_entry << "]";
} else {
- os << "log_entry=nullptr, ";
+ os << ", log_entry=nullptr";
}
return os;
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_BlockIORequest<T> &req) {
- os << "image_extents=" << req.image_extents << ","
- << " image_extents_summary=[" << req.image_extents_summary << "],"
- << " bl=" << req.bl << ","
- << " user_req=" << req.user_req << ","
- << " m_user_req_completed=" << req.m_user_req_completed << ","
- << " m_deferred=" << req.m_deferred << ","
- << " detained=" << req.detained;
+ os << "image_extents=" << req.image_extents
+ << ", image_extents_summary=[" << req.image_extents_summary
+ << "], bl=" << req.bl
+ << ", user_req=" << req.user_req
+ << ", m_user_req_completed=" << req.m_user_req_completed
+ << ", m_deferred=" << req.m_deferred
+ << ", detained=" << req.detained;
return os;
}
os << (C_BlockIORequest<T>&)req
<< " m_resources.allocated=" << req.m_resources.allocated;
if (req.op_set) {
- os << " op_set=" << *req.op_set;
+ os << " op_set=[" << *req.op_set << "]";
}
return os;
}
current_sync_point,
pwl.get_persist_on_flush(),
pwl.get_context(), this);
- ldout(pwl.get_context(), 20) << "write_req=[" << *this << "]"
- << " op_set=" << op_set.get() << dendl;
+ ldout(pwl.get_context(), 20) << "write_req=[" << *this
+ << "], op_set=" << op_set.get() << dendl;
ceph_assert(m_resources.allocated);
/* op_set->operations initialized differently for plain write or write same */
auto allocation = m_resources.buffers.begin();
this->op_set->operations.emplace_back(operation);
/* A WS is also a write */
- ldout(pwl.get_context(), 20) << "write_req=[" << *this << "]"
- << " op_set=" << op_set.get()
- << " operation=" << operation << dendl;
+ ldout(pwl.get_context(), 20) << "write_req=[" << *this
+ << "], op_set=" << op_set.get()
+ << ", operation=" << operation << dendl;
log_entries.emplace_back(operation->log_entry);
if (!op_set->persist_on_flush) {
pwl.inc_last_op_sequence_num();
template <typename T>
bool C_FlushRequest<T>::alloc_resources() {
- ldout(pwl.get_context(), 20) << "req type=" << get_name() << " "
- << "req=[" << *this << "]" << dendl;
+ ldout(pwl.get_context(), 20) << "req type=" << get_name()
+ << " req=[" << *this << "]" << dendl;
return pwl.alloc_resources(this);
}
template <typename T>
void C_FlushRequest<T>::dispatch() {
utime_t now = ceph_clock_now();
- ldout(pwl.get_context(), 20) << "req type=" << get_name() << " "
- << "req=[" << *this << "]" << dendl;
+ ldout(pwl.get_context(), 20) << "req type=" << get_name()
+ << " req=[" << *this << "]" << dendl;
ceph_assert(this->m_resources.allocated);
this->m_dispatched_time = now;
template <typename T>
bool C_DiscardRequest<T>::alloc_resources() {
- ldout(pwl.get_context(), 20) << "req type=" << get_name() << " "
- << "req=[" << *this << "]" << dendl;
+ ldout(pwl.get_context(), 20) << "req type=" << get_name()
+ << " req=[" << *this << "]" << dendl;
return pwl.alloc_resources(this);
}
template <typename T>
void C_DiscardRequest<T>::dispatch() {
utime_t now = ceph_clock_now();
- ldout(pwl.get_context(), 20) << "req type=" << get_name() << " "
- << "req=[" << *this << "]" << dendl;
+ ldout(pwl.get_context(), 20) << "req type=" << get_name()
+ << " req=[" << *this << "]" << dendl;
ceph_assert(this->m_resources.allocated);
this->m_dispatched_time = now;
setup_log_operations();
bool queued = false; /* Queued for barrier */
friend std::ostream &operator<<(std::ostream &os,
const BlockGuardReqState &r) {
- os << "barrier=" << r.barrier << ", "
- << "current_barrier=" << r.current_barrier << ", "
- << "detained=" << r.detained << ", "
- << "queued=" << r.queued;
+ os << "barrier=" << r.barrier
+ << ", current_barrier=" << r.current_barrier
+ << ", detained=" << r.detained
+ << ", queued=" << r.queued;
return os;
}
};
}
friend std::ostream &operator<<(std::ostream &os,
const GuardedRequest &r) {
- os << "guard_ctx->state=[" << r.guard_ctx->state << "], "
- << "block_extent.block_start=" << r.block_extent.block_start << ", "
- << "block_extent.block_start=" << r.block_extent.block_end;
+ os << "guard_ctx->state=[" << r.guard_ctx->state
+ << "], block_extent.block_start=" << r.block_extent.block_start
+ << ", block_extent.block_end=" << r.block_extent.block_end;
return os;
}
};
std::ostream &operator<<(std::ostream &os,
const SyncPoint &p) {
- os << "log_entry=[" << *p.log_entry << "], "
- << "earlier_sync_point=" << p.earlier_sync_point << ", "
- << "later_sync_point=" << p.later_sync_point << ", "
- << "m_final_op_sequence_num=" << p.m_final_op_sequence_num << ", "
- << "m_prior_log_entries_persisted=" << p.m_prior_log_entries_persisted << ", "
- << "m_prior_log_entries_persisted_complete=" << p.m_prior_log_entries_persisted_complete << ", "
- << "m_append_scheduled=" << p.m_append_scheduled << ", "
- << "appending=" << p.appending << ", "
- << "on_sync_point_appending=" << p.on_sync_point_appending.size() << ", "
- << "on_sync_point_persisted=" << p.on_sync_point_persisted.size();
+ os << "log_entry=[" << *p.log_entry
+ << "], earlier_sync_point=" << p.earlier_sync_point
+ << ", later_sync_point=" << p.later_sync_point
+ << ", m_final_op_sequence_num=" << p.m_final_op_sequence_num
+ << ", m_prior_log_entries_persisted=" << p.m_prior_log_entries_persisted
+ << ", m_prior_log_entries_persisted_complete=" << p.m_prior_log_entries_persisted_complete
+ << ", m_append_scheduled=" << p.m_append_scheduled
+ << ", appending=" << p.appending
+ << ", on_sync_point_appending=" << p.on_sync_point_appending.size()
+ << ", on_sync_point_persisted=" << p.on_sync_point_persisted.size();
return os;
}
std::ostream& operator<<(std::ostream& os,
const WriteLogCacheEntry &entry) {
- os << "entry_valid=" << (bool)entry.entry_valid << ", "
- << "sync_point=" << (bool)entry.sync_point << ", "
- << "sequenced=" << (bool)entry.sequenced << ", "
- << "has_data=" << (bool)entry.has_data << ", "
- << "discard=" << (bool)entry.discard << ", "
- << "writesame=" << (bool)entry.writesame << ", "
- << "sync_gen_number=" << entry.sync_gen_number << ", "
- << "write_sequence_number=" << entry.write_sequence_number << ", "
- << "image_offset_bytes=" << entry.image_offset_bytes << ", "
- << "write_bytes=" << entry.write_bytes << ", "
- << "ws_datalen=" << entry.ws_datalen << ", "
- << "entry_index=" << entry.entry_index;
+ os << "entry_valid=" << (bool)entry.entry_valid
+ << ", sync_point=" << (bool)entry.sync_point
+ << ", sequenced=" << (bool)entry.sequenced
+ << ", has_data=" << (bool)entry.has_data
+ << ", discard=" << (bool)entry.discard
+ << ", writesame=" << (bool)entry.writesame
+ << ", sync_gen_number=" << entry.sync_gen_number
+ << ", write_sequence_number=" << entry.write_sequence_number
+ << ", image_offset_bytes=" << entry.image_offset_bytes
+ << ", write_bytes=" << entry.write_bytes
+ << ", ws_datalen=" << entry.ws_datalen
+ << ", entry_index=" << entry.entry_index;
return os;
}
explicit ExtentsSummary(const ExtentsType &extents);
friend std::ostream &operator<<(std::ostream &os,
const ExtentsSummary &s) {
- os << "total_bytes=" << s.total_bytes << ", "
- << "first_image_byte=" << s.first_image_byte << ", "
- << "last_image_byte=" << s.last_image_byte;
+ os << "total_bytes=" << s.total_bytes
+ << ", first_image_byte=" << s.first_image_byte
+ << ", last_image_byte=" << s.last_image_byte;
return os;
}
BlockExtent block_extent() {
std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<T> &req) {
os << (C_WriteRequest<T>&)req
- << " cmp_bl=" << req.cmp_bl << ","
- << " read_bl=" << req.read_bl << ","
- << " compare_succeeded=" << req.compare_succeeded << ","
- << " mismatch_offset=" << req.mismatch_offset;
+ << " cmp_bl=" << req.cmp_bl
+ << ", read_bl=" << req.read_bl
+ << ", compare_succeeded=" << req.compare_succeeded
+ << ", mismatch_offset=" << req.mismatch_offset;
return os;
}
}
}
ldout(m_image_ctx.cct, 20) << "Copying entry for operation at index="
- << operation->get_log_entry()->log_entry_index << " "
- << "from " << &operation->get_log_entry()->ram_entry << " "
- << "to " << operation->get_log_entry()->cache_entry << " "
- << "operation=[" << *operation << "]" << dendl;
+ << operation->get_log_entry()->log_entry_index
+ << " from " << &operation->get_log_entry()->ram_entry
+ << " to " << operation->get_log_entry()->cache_entry
+ << " operation=[" << *operation << "]" << dendl;
operation->log_append_start_time = now;
*operation->get_log_entry()->cache_entry = operation->get_log_entry()->ram_entry;
ldout(m_image_ctx.cct, 20) << "APPENDING: index="
- << operation->get_log_entry()->log_entry_index << " "
- << "pmem_entry=[" << *operation->get_log_entry()->cache_entry
+ << operation->get_log_entry()->log_entry_index
+ << " pmem_entry=[" << *operation->get_log_entry()->cache_entry
<< "]" << dendl;
entries_to_flush.push_back(operation);
}
ceph_assert(ops.front()->get_log_entry()->cache_entry < ops.back()->get_log_entry()->cache_entry);
}
- ldout(m_image_ctx.cct, 20) << "entry count=" << ops.size() << " "
- << "start address="
- << ops.front()->get_log_entry()->cache_entry << " "
- << "bytes="
+ ldout(m_image_ctx.cct, 20) << "entry count=" << ops.size()
+ << " start address="
+ << ops.front()->get_log_entry()->cache_entry
+ << " bytes="
<< ops.size() * sizeof(*(ops.front()->get_log_entry()->cache_entry))
<< dendl;
pmemobj_flush(m_log_pool,
std::advance(last_in_batch, ops_to_flush);
ops.splice(ops.end(), m_ops_to_flush, m_ops_to_flush.begin(), last_in_batch);
ops_remain = !m_ops_to_flush.empty();
- ldout(m_image_ctx.cct, 20) << "flushing " << ops.size() << ", "
- << m_ops_to_flush.size() << " remain" << dendl;
+ ldout(m_image_ctx.cct, 20) << "flushing " << ops.size() << ", remain "
+ << m_ops_to_flush.size() << dendl;
} else {
ops_remain = false;
}
std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<T> &req) {
os << (C_WriteRequest<T>&)req
- << " cmp_bl=" << req.cmp_bl << ","
- << " read_bl=" << req.read_bl << ","
- << " compare_succeeded=" << req.compare_succeeded << ","
- << " mismatch_offset=" << req.mismatch_offset;
+ << " cmp_bl=" << req.cmp_bl
+ << ", read_bl=" << req.read_bl
+ << ", compare_succeeded=" << req.compare_succeeded
+ << ", mismatch_offset=" << req.mismatch_offset;
return os;
}
this->m_bytes_cached -= cached_bytes;
ldout(m_image_ctx.cct, 20)
- << "Finished root update: " << "initial_first_valid_entry="
- << initial_first_valid_entry << ", " << "m_first_valid_entry="
- << m_first_valid_entry << "," << "release space = "
- << allocated_bytes << "," << "m_bytes_allocated="
- << m_bytes_allocated << "," << "release cached space="
- << cached_bytes << "," << "m_bytes_cached="
+ << "Finished root update: initial_first_valid_entry="
+ << initial_first_valid_entry << ", m_first_valid_entry="
+ << m_first_valid_entry << ", release space = "
+ << allocated_bytes << ", m_bytes_allocated="
+ << m_bytes_allocated << ", release cached space="
+ << cached_bytes << ", m_bytes_cached="
<< this->m_bytes_cached << dendl;
this->m_alloc_failed_since_retire = false;