some comments are outdated, some are no longger needed, update them.
remove redundant ";". remove redundant log. remove redundant "".
Signed-off-by: Yin Congmin <congmin.yin@intel.com>
ceph_assert(kv.first == m_current_sync_gen+1);
init_flush_new_sync_point(later);
ceph_assert(kv.first == m_current_sync_gen);
- sync_point_entries[kv.first] = m_current_sync_point->log_entry;;
+ sync_point_entries[kv.first] = m_current_sync_point->log_entry;
}
/*
ldout(cct, 5) << "There's an existing pool file " << m_log_pool_name
<< ", While there's no cache in the image metatata." << dendl;
if (remove(m_log_pool_name.c_str()) != 0) {
- lderr(cct) << "Failed to remove the pool file " << m_log_pool_name
+ lderr(cct) << "failed to remove the pool file " << m_log_pool_name
<< dendl;
on_finish->complete(-errno);
return;
}
} else if ((m_cache_state->present) &&
(access(m_log_pool_name.c_str(), F_OK) != 0)) {
- ldout(cct, 5) << "Can't find the existed pool file " << m_log_pool_name << dendl;
+ lderr(cct) << "can't find the existed pool file: " << m_log_pool_name
+ << ". error: " << cpp_strerror(-errno) << dendl;
on_finish->complete(-errno);
return;
}
ceph_assert(m_initialized);
- /* Split images because PMDK's space management is not perfect, there are
- * fragment problems. The larger the block size difference of the block,
- * the easier the fragmentation problem will occur, resulting in the
- * remaining space can not be allocated in large size. We plan to manage
- * pmem space and allocation by ourselves in the future.
+ /* Split image extents larger than 1M. This isn't strictly necessary but
+ * makes libpmemobj allocator's job easier and reduces pmemobj_defrag() cost.
+ * We plan to manage pmem space and allocation by ourselves in the future.
*/
Extents split_image_extents;
uint64_t max_extent_size = get_max_extent();
template <typename I>
void AbstractWriteLog<I>::check_image_cache_state_clean() {
ceph_assert(m_deferred_ios.empty());
- ceph_assert(m_ops_to_append.empty());;
+ ceph_assert(m_ops_to_append.empty());
ceph_assert(m_async_flush_ops == 0);
ceph_assert(m_async_append_ops == 0);
ceph_assert(m_dirty_log_entries.empty());
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_BlockIORequest<T> &req) {
- os << "image_extents=[" << req.image_extents << "], "
- << "image_extents_summary=[" << req.image_extents_summary << "], "
- << "bl=" << req.bl << ", "
- << "user_req=" << req.user_req << ", "
- << "m_user_req_completed=" << req.m_user_req_completed << ", "
- << "m_deferred=" << req.m_deferred << ", "
- << "detained=" << req.detained << ", "
- << "waited_lanes=" << req.waited_lanes << ", "
- << "waited_entries=" << req.waited_entries << ", "
- << "waited_buffers=" << req.waited_buffers << "";
+ os << "image_extents=" << req.image_extents << ","
+ << " image_extents_summary=[" << req.image_extents_summary << "],"
+ << " bl=" << req.bl << ","
+ << " user_req=" << req.user_req << ","
+ << " m_user_req_completed=" << req.m_user_req_completed << ","
+ << " m_deferred=" << req.m_deferred << ","
+ << " detained=" << req.detained << ","
+ << " waited_lanes=" << req.waited_lanes << ","
+ << " waited_entries=" << req.waited_entries << ","
+ << " waited_buffers=" << req.waited_buffers;
return os;
}
os << (C_BlockIORequest<T>&)req
<< " m_resources.allocated=" << req.m_resources.allocated;
if (req.op_set) {
- os << "op_set=" << *req.op_set;
+ os << " op_set=" << *req.op_set;
}
return os;
}
current_sync_point,
pwl.get_persist_on_flush(),
pwl.get_context(), this);
- ldout(pwl.get_context(), 20) << "write_req=" << *this << " op_set=" << op_set.get() << dendl;
+ ldout(pwl.get_context(), 20) << "write_req=[" << *this << "]"
+ << " op_set=" << op_set.get() << dendl;
ceph_assert(m_resources.allocated);
/* op_set->operations initialized differently for plain write or write same */
auto allocation = m_resources.buffers.begin();
this->op_set->operations.emplace_back(operation);
/* A WS is also a write */
- ldout(pwl.get_context(), 20) << "write_req=" << *this << " op_set=" << op_set.get()
+ ldout(pwl.get_context(), 20) << "write_req=[" << *this << "]"
+ << " op_set=" << op_set.get()
<< " operation=" << operation << dendl;
log_entries.emplace_back(operation->log_entry);
if (!op_set->persist_on_flush) {
<< "m_append_scheduled=" << p.m_append_scheduled << ", "
<< "appending=" << p.appending << ", "
<< "on_sync_point_appending=" << p.on_sync_point_appending.size() << ", "
- << "on_sync_point_persisted=" << p.on_sync_point_persisted.size() << "";
+ << "on_sync_point_persisted=" << p.on_sync_point_persisted.size();
return os;
}
#ifdef WITH_RBD_RWL
union {
struct {
- uint8_t layout_version; /* Version of this structure (RWL_LAYOUT_VERSION) */
+ uint8_t layout_version;
};
uint64_t _u64;
} header;
#endif
#ifdef WITH_RBD_SSD_CACHE
uint64_t layout_version = 0;
- uint64_t cur_sync_gen = 0;
+ uint64_t cur_sync_gen = 0; /* TODO: remove it when changing disk format */
#endif
uint64_t pool_size;
- uint64_t flushed_sync_gen; /* All writing entries with this or a lower
- * sync gen number are flushed. */
- uint32_t block_size; /* block size */
+ uint64_t flushed_sync_gen; /* All writing entries with this or a lower
+ * sync gen number are flushed. */
+ uint32_t block_size;
uint32_t num_log_entries;
- uint64_t first_free_entry; /* Entry following the newest valid entry */
- uint64_t first_valid_entry; /* Index of the oldest valid entry in the log */
+ uint64_t first_free_entry; /* The free entry following the latest valid
+ * entry, which is going to be written */
+ uint64_t first_valid_entry; /* The oldest valid entry to be retired */
#ifdef WITH_RBD_SSD_CACHE
DENC(WriteLogPoolRoot, v, p) {
const ExtentsSummary &s) {
os << "total_bytes=" << s.total_bytes << ", "
<< "first_image_byte=" << s.first_image_byte << ", "
- << "last_image_byte=" << s.last_image_byte << "";
+ << "last_image_byte=" << s.last_image_byte;
return os;
}
BlockExtent block_extent() {
std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<T> &req) {
os << (C_WriteRequest<T>&)req
- << "cmp_bl=" << req.cmp_bl << ", "
- << "read_bl=" << req.read_bl << ", "
- << "compare_succeeded=" << req.compare_succeeded << ", "
- << "mismatch_offset=" << req.mismatch_offset;
+ << " cmp_bl=" << req.cmp_bl << ","
+ << " read_bl=" << req.read_bl << ","
+ << " compare_succeeded=" << req.compare_succeeded << ","
+ << " mismatch_offset=" << req.mismatch_offset;
return os;
}
<< "from " << &operation->get_log_entry()->ram_entry << " "
<< "to " << operation->get_log_entry()->cache_entry << " "
<< "operation=[" << *operation << "]" << dendl;
- ldout(m_image_ctx.cct, 05) << "APPENDING: index="
- << operation->get_log_entry()->log_entry_index << " "
- << "operation=[" << *operation << "]" << dendl;
operation->log_append_start_time = now;
*operation->get_log_entry()->cache_entry = operation->get_log_entry()->ram_entry;
ldout(m_image_ctx.cct, 20) << "APPENDING: index="
this->m_pwl_pool_layout_name,
this->m_log_pool_size,
(S_IWUSR | S_IRUSR))) == NULL) {
- lderr(cct) << "failed to create pool (" << this->m_log_pool_name << ")"
- << pmemobj_errormsg() << dendl;
+ lderr(cct) << "failed to create pool: " << this->m_log_pool_name
+ << ". error: " << pmemobj_errormsg() << dendl;
m_cache_state->present = false;
m_cache_state->clean = true;
m_cache_state->empty = true;
} TX_ONABORT {
this->m_total_log_entries = 0;
this->m_free_log_entries = 0;
- lderr(cct) << "failed to initialize pool (" << this->m_log_pool_name << ")" << dendl;
+ lderr(cct) << "failed to initialize pool: " << this->m_log_pool_name
+ << ". pmemobj TX errno: " << pmemobj_tx_errno() << dendl;
r = -pmemobj_tx_errno();
goto err_close_pool;
} TX_FINALLY {
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
if (D_RO(pool_root)->header.layout_version != RWL_LAYOUT_VERSION) {
// TODO: will handle upgrading version in the future
- lderr(cct) << "Pool layout version is "
+ lderr(cct) << "pool layout version is "
<< D_RO(pool_root)->header.layout_version
<< " expected " << RWL_LAYOUT_VERSION << dendl;
goto err_close_pool;
}
if (D_RO(pool_root)->block_size != MIN_WRITE_ALLOC_SIZE) {
- lderr(cct) << "Pool block size is " << D_RO(pool_root)->block_size
+ lderr(cct) << "pool block size is " << D_RO(pool_root)->block_size
<< " expected " << MIN_WRITE_ALLOC_SIZE << dendl;
goto err_close_pool;
}
this->can_retire_entry(m_log_entries.front())) {
auto entry = m_log_entries.front();
if (entry->log_entry_index != first_valid_entry) {
- lderr(cct) << "Retiring entry index (" << entry->log_entry_index
+ lderr(cct) << "retiring entry index (" << entry->log_entry_index
<< ") and first valid log entry index (" << first_valid_entry
<< ") must be ==." << dendl;
}
std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<T> &req) {
os << (C_WriteRequest<T>&)req
- << "cmp_bl=" << req.cmp_bl << ", "
- << "read_bl=" << req.read_bl << ", "
- << "compare_succeeded=" << req.compare_succeeded << ", "
- << "mismatch_offset=" << req.mismatch_offset;
+ << " cmp_bl=" << req.cmp_bl << ","
+ << " read_bl=" << req.read_bl << ","
+ << " compare_succeeded=" << req.compare_succeeded << ","
+ << " mismatch_offset=" << req.mismatch_offset;
return os;
}
::IOContext ioctx(cct, nullptr);
r = bdev->read(0, MIN_WRITE_ALLOC_SSD_SIZE, &bl, &ioctx, false);
if (r < 0) {
- lderr(cct) << "Read ssd cache superblock failed " << dendl;
+ lderr(cct) << "read ssd cache superblock failed " << dendl;
goto err_close_bdev;
}
auto p = bl.cbegin();
<< dendl;
ceph_assert(is_valid_pool_root(pool_root));
if (pool_root.layout_version != SSD_LAYOUT_VERSION) {
- lderr(cct) << "Pool layout version is "
+ lderr(cct) << "pool layout version is "
<< pool_root.layout_version
<< " expected " << SSD_LAYOUT_VERSION
<< dendl;
goto err_close_bdev;
}
if (pool_root.block_size != MIN_WRITE_ALLOC_SSD_SIZE) {
- lderr(cct) << "Pool block size is " << pool_root.block_size
+ lderr(cct) << "pool block size is " << pool_root.block_size
<< " expected " << MIN_WRITE_ALLOC_SSD_SIZE
<< dendl;
goto err_close_bdev;
GenericLogOperations ops;
ldout(m_image_ctx.cct, 20) << dendl;
- bool ops_remain = false; //no-op variable for SSD
- bool appending = false; //no-op variable for SSD
+ bool ops_remain = false; // unused, no-op variable for SSD
+ bool appending = false; // unused, no-op variable for SSD
this->append_scheduled(ops, ops_remain, appending);
if (ops.size()) {
});
Context *append_ctx = new LambdaContext([this, ctx](int r) {
ldout(m_image_ctx.cct, 15) << "Finish the update of pool root." << dendl;
- bool need_finisher = false;;
+ bool need_finisher = false;
assert(r == 0);
{
std::lock_guard locker(m_lock);