template <typename I>
void AbstractWriteLog<I>::periodic_stats() {
- std::lock_guard locker(m_lock);
- update_image_cache_state();
+ {
+ std::lock_guard locker(m_lock);
+ update_image_cache_state();
+ }
+ write_image_cache_state();
ldout(m_image_ctx.cct, 5) << "STATS: m_log_entries=" << m_log_entries.size()
<< ", m_dirty_log_entries=" << m_dirty_log_entries.size()
<< ", m_free_log_entries=" << m_free_log_entries
}
template <typename I>
-void AbstractWriteLog<I>::update_image_cache_state() {
+void AbstractWriteLog<I>::write_image_cache_state() {
using klass = AbstractWriteLog<I>;
Context *ctx = util::create_context_callback<
- klass, &klass::handle_update_image_cache_state>(this);
- update_image_cache_state(ctx);
+ klass, &klass::handle_write_image_cache_state>(this);
+ m_cache_state->write_image_cache_state(ctx);
}
template <typename I>
-void AbstractWriteLog<I>::update_image_cache_state(Context *on_finish) {
+void AbstractWriteLog<I>::update_image_cache_state() {
ldout(m_image_ctx.cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
m_cache_state->hit_bytes = m_perfcounter->get(l_librbd_pwl_rd_hit_bytes);
m_cache_state->miss_bytes = m_perfcounter->get(l_librbd_pwl_rd_bytes) -
m_cache_state->hit_bytes;
- m_cache_state->write_image_cache_state(on_finish);
}
template <typename I>
-void AbstractWriteLog<I>::handle_update_image_cache_state(int r) {
+void AbstractWriteLog<I>::handle_write_image_cache_state(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
if (r >= 0) {
- std::lock_guard locker(m_lock);
- update_image_cache_state(on_finish);
+ {
+ std::lock_guard locker(m_lock);
+ update_image_cache_state();
+ }
+ m_cache_state->write_image_cache_state(on_finish);
} else {
on_finish->complete(r);
}
Context *next_ctx = override_ctx(r, ctx);
periodic_stats();
- std::lock_guard locker(m_lock);
- check_image_cache_state_clean();
- m_wake_up_enabled = false;
- m_log_entries.clear();
- m_cache_state->clean = true;
- m_cache_state->empty = true;
- remove_pool_file();
- update_image_cache_state(next_ctx);
+ {
+ std::lock_guard locker(m_lock);
+ check_image_cache_state_clean();
+ m_wake_up_enabled = false;
+ m_log_entries.clear();
+ m_cache_state->clean = true;
+ m_cache_state->empty = true;
+ remove_pool_file();
+ update_image_cache_state();
+ }
+ m_cache_state->write_image_cache_state(next_ctx);
});
ctx = new LambdaContext(
[this, ctx](int r) {
{
GenericLogEntries dirty_entries;
int published_reserves = 0;
+ bool need_update_state = false;
ldout(m_image_ctx.cct, 20) << __func__ << ": completing" << dendl;
for (auto &op : ops) {
utime_t now = ceph_clock_now();
if (m_cache_state->clean && !this->m_dirty_log_entries.empty()) {
m_cache_state->clean = false;
update_image_cache_state();
+ need_update_state = true;
}
}
op->complete(result);
log_entry->ram_entry.write_bytes);
m_perfcounter->tinc(l_librbd_pwl_log_op_app_to_cmp_t, now - op->log_append_start_time);
}
+ if (need_update_state) {
+ write_image_cache_state();
+ }
// New entries may be flushable
{
std::lock_guard locker(m_lock);
bool all_clean = false;
int flushed = 0;
bool has_write_entry = false;
+ bool need_update_state = false;
ldout(cct, 20) << "Look for dirty entries" << dendl;
{
if (!m_cache_state->clean && all_clean) {
m_cache_state->clean = true;
update_image_cache_state();
+ need_update_state = true;
}
break;
}
construct_flush_entries(entries_to_flush, post_unlock, has_write_entry);
}
+ if (need_update_state) {
+ write_image_cache_state();
+ }
if (all_clean) {
/* All flushing complete, drain outside lock */
bool all_clean;
bool flushing;
bool stop_flushing;
+ bool need_update_state = false;
{
std::lock_guard locker(m_lock);
if (!m_cache_state->clean && all_clean && !flushing) {
m_cache_state->clean = true;
update_image_cache_state();
+ need_update_state = true;
}
stop_flushing = (m_shutting_down);
}
+ if (need_update_state) {
+ write_image_cache_state();
+ }
if (!flushing && (all_clean || stop_flushing)) {
/* Complete without holding m_lock */
void arm_periodic_stats();
void pwl_init(Context *on_finish, pwl::DeferredContexts &later);
- void update_image_cache_state(Context *on_finish);
- void handle_update_image_cache_state(int r);
void check_image_cache_state_clean();
void flush_dirty_entries(Context *on_finish);
return 0;
}
void update_image_cache_state(void);
+ void write_image_cache_state(void);
+ void handle_write_image_cache_state(int r);
};
} // namespace pwl
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
struct WriteLogCacheEntry *pmem_log_entries = D_RW(D_RW(pool_root)->log_entries);
+ bool need_update_state = false;
ceph_assert(ceph_mutex_is_locked_by_me(this->m_log_append_lock));
- /* Allocate the (already reserved) log entries */
- std::lock_guard locker(m_lock);
+ {
+ /* Allocate the (already reserved) log entries */
+ std::lock_guard locker(m_lock);
- for (auto &operation : ops) {
- uint32_t entry_index = this->m_first_free_entry;
- this->m_first_free_entry = (this->m_first_free_entry + 1) % this->m_total_log_entries;
- auto &log_entry = operation->get_log_entry();
- log_entry->log_entry_index = entry_index;
- log_entry->ram_entry.entry_index = entry_index;
- log_entry->cache_entry = &pmem_log_entries[entry_index];
- log_entry->ram_entry.set_entry_valid(true);
- m_log_entries.push_back(log_entry);
- ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
+ for (auto &operation : ops) {
+ uint32_t entry_index = this->m_first_free_entry;
+ this->m_first_free_entry = (this->m_first_free_entry + 1) % this->m_total_log_entries;
+ auto &log_entry = operation->get_log_entry();
+ log_entry->log_entry_index = entry_index;
+ log_entry->ram_entry.entry_index = entry_index;
+ log_entry->cache_entry = &pmem_log_entries[entry_index];
+ log_entry->ram_entry.set_entry_valid(true);
+ m_log_entries.push_back(log_entry);
+ ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
+ }
+ if (m_cache_state->empty && !m_log_entries.empty()) {
+ m_cache_state->empty = false;
+ this->update_image_cache_state();
+ need_update_state = true;
+ }
}
- if (m_cache_state->empty && !m_log_entries.empty()) {
- m_cache_state->empty = false;
- this->update_image_cache_state();
+ if (need_update_state) {
+ this->write_image_cache_state();
}
}
m_perfcounter->hinc(l_librbd_pwl_retire_tx_t_hist, utime_t(tx_end - tx_start).to_nsec(),
retiring_entries.size());
+ bool need_update_state = false;
/* Update runtime copy of first_valid, and free entries counts */
{
std::lock_guard locker(m_lock);
if (!m_cache_state->empty && m_log_entries.empty()) {
m_cache_state->empty = true;
this->update_image_cache_state();
+ need_update_state = true;
}
for (auto &entry: retiring_entries) {
if (entry->write_bytes()) {
this->m_alloc_failed_since_retire = false;
this->wake_up();
}
+ if (need_update_state) {
+ this->write_image_cache_state();
+ }
} else {
ldout(cct, 20) << "Nothing to retire" << dendl;
return false;
template <typename I>
void WriteLog<I>::alloc_op_log_entries(GenericLogOperations &ops) {
- std::lock_guard locker(m_lock);
-
- for (auto &operation : ops) {
- auto &log_entry = operation->get_log_entry();
- log_entry->ram_entry.set_entry_valid(true);
- m_log_entries.push_back(log_entry);
- ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
+ bool need_update_state = false;
+ {
+ std::lock_guard locker(m_lock);
+ for (auto &operation : ops) {
+ auto &log_entry = operation->get_log_entry();
+ log_entry->ram_entry.set_entry_valid(true);
+ m_log_entries.push_back(log_entry);
+ ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
+ }
+ if (m_cache_state->empty && !m_log_entries.empty()) {
+ m_cache_state->empty = false;
+ this->update_image_cache_state();
+ need_update_state = true;
+ }
}
- if (m_cache_state->empty && !m_log_entries.empty()) {
- m_cache_state->empty = false;
- this->update_image_cache_state();
+ if (need_update_state) {
+ this->write_image_cache_state();
}
}
allocated_bytes += entry->get_aligned_data_size();
}
}
+ bool need_update_state = false;
{
std::lock_guard locker(m_lock);
m_first_valid_entry = first_valid_entry;
if (!m_cache_state->empty && m_log_entries.empty()) {
m_cache_state->empty = true;
this->update_image_cache_state();
+ need_update_state = true;
}
ldout(m_image_ctx.cct, 20)
this->m_alloc_failed_since_retire = false;
this->wake_up();
}
+ if (need_update_state) {
+ this->write_image_cache_state();
+ }
this->dispatch_deferred_writes();
this->process_writeback_dirty_entries();