template <typename I>
void AbstractWriteLog<I>::periodic_stats() {
- {
- std::lock_guard locker(m_lock);
- update_image_cache_state();
- }
- write_image_cache_state();
+ std::unique_lock locker(m_lock);
ldout(m_image_ctx.cct, 5) << "STATS: m_log_entries=" << m_log_entries.size()
<< ", m_dirty_log_entries=" << m_dirty_log_entries.size()
<< ", m_free_log_entries=" << m_free_log_entries
<< ", m_current_sync_gen=" << m_current_sync_gen
<< ", m_flushed_sync_gen=" << m_flushed_sync_gen
<< dendl;
+
+ update_image_cache_state();
+ write_image_cache_state(locker);
}
template <typename I>
}
template <typename I>
-void AbstractWriteLog<I>::write_image_cache_state() {
+void AbstractWriteLog<I>::write_image_cache_state(std::unique_lock<ceph::mutex>& locker) {
using klass = AbstractWriteLog<I>;
Context *ctx = util::create_context_callback<
klass, &klass::handle_write_image_cache_state>(this);
- m_cache_state->write_image_cache_state(ctx);
+ m_cache_state->write_image_cache_state(locker, ctx);
}
template <typename I>
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
if (r >= 0) {
- {
- std::lock_guard locker(m_lock);
- update_image_cache_state();
- }
- m_cache_state->write_image_cache_state(on_finish);
+ std::unique_lock locker(m_lock);
+ update_image_cache_state();
+ m_cache_state->write_image_cache_state(locker, on_finish);
} else {
on_finish->complete(r);
}
Context *next_ctx = override_ctx(r, ctx);
periodic_stats();
- {
- std::lock_guard locker(m_lock);
- check_image_cache_state_clean();
- m_wake_up_enabled = false;
- m_log_entries.clear();
- m_cache_state->clean = true;
- m_cache_state->empty = true;
- remove_pool_file();
- update_image_cache_state();
- }
- m_cache_state->write_image_cache_state(next_ctx);
+ std::unique_lock locker(m_lock);
+ check_image_cache_state_clean();
+ m_wake_up_enabled = false;
+ m_log_entries.clear();
+ m_cache_state->clean = true;
+ m_cache_state->empty = true;
+ remove_pool_file();
+ update_image_cache_state();
+ m_cache_state->write_image_cache_state(locker, next_ctx);
});
ctx = new LambdaContext(
[this, ctx](int r) {
m_perfcounter->tinc(l_librbd_pwl_log_op_app_to_cmp_t, now - op->log_append_start_time);
}
if (need_update_state) {
- write_image_cache_state();
+ std::unique_lock locker(m_lock);
+ write_image_cache_state(locker);
}
// New entries may be flushable
{
construct_flush_entries(entries_to_flush, post_unlock, has_write_entry);
}
if (need_update_state) {
- write_image_cache_state();
+ std::unique_lock locker(m_lock);
+ write_image_cache_state(locker);
}
if (all_clean) {
bool all_clean;
bool flushing;
bool stop_flushing;
- bool need_update_state = false;
{
- std::lock_guard locker(m_lock);
+ std::unique_lock locker(m_lock);
flushing = (0 != m_flush_ops_in_flight);
all_clean = m_dirty_log_entries.empty();
+ stop_flushing = (m_shutting_down);
if (!m_cache_state->clean && all_clean && !flushing) {
m_cache_state->clean = true;
update_image_cache_state();
- need_update_state = true;
+ write_image_cache_state(locker);
}
- stop_flushing = (m_shutting_down);
- }
- if (need_update_state) {
- write_image_cache_state();
}
if (!flushing && (all_clean || stop_flushing)) {
return 0;
}
void update_image_cache_state(void);
- void write_image_cache_state(void);
+ void write_image_cache_state(std::unique_lock<ceph::mutex>& locker);
void handle_write_image_cache_state(int r);
};
}
template <typename I>
-void ImageCacheState<I>::write_image_cache_state(Context *on_finish) {
+void ImageCacheState<I>::write_image_cache_state(std::unique_lock<ceph::mutex>& locker,
+ Context *on_finish) {
+ ceph_assert(ceph_mutex_is_locked_by_me(*locker.mutex()));
stats_timestamp = ceph_clock_now();
- std::shared_lock owner_lock{m_image_ctx->owner_lock};
json_spirit::mObject o;
o["present"] = present;
o["empty"] = empty;
o["hit_bytes"] = hit_bytes;
o["miss_bytes"] = miss_bytes;
std::string image_state_json = json_spirit::write(o);
+ locker.unlock();
+ std::shared_lock owner_lock{m_image_ctx->owner_lock};
ldout(m_image_ctx->cct, 20) << __func__ << " Store state: "
<< image_state_json << dendl;
m_plugin_api.execute_image_metadata_set(m_image_ctx, PERSISTENT_CACHE_STATE,
void init_from_config();
bool init_from_metadata(json_spirit::mValue& json_root);
- void write_image_cache_state(Context *on_finish);
+ void write_image_cache_state(std::unique_lock<ceph::mutex>& locker,
+ Context *on_finish);
void clear_image_cache_state(Context *on_finish);
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
struct WriteLogCacheEntry *pmem_log_entries = D_RW(D_RW(pool_root)->log_entries);
- bool need_update_state = false;
ceph_assert(ceph_mutex_is_locked_by_me(this->m_log_append_lock));
- {
- /* Allocate the (already reserved) log entries */
- std::lock_guard locker(m_lock);
+ /* Allocate the (already reserved) log entries */
+ std::unique_lock locker(m_lock);
- for (auto &operation : ops) {
- uint32_t entry_index = this->m_first_free_entry;
- this->m_first_free_entry = (this->m_first_free_entry + 1) % this->m_total_log_entries;
- auto &log_entry = operation->get_log_entry();
- log_entry->log_entry_index = entry_index;
- log_entry->ram_entry.entry_index = entry_index;
- log_entry->cache_entry = &pmem_log_entries[entry_index];
- log_entry->ram_entry.set_entry_valid(true);
- m_log_entries.push_back(log_entry);
- ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
- }
- if (m_cache_state->empty && !m_log_entries.empty()) {
- m_cache_state->empty = false;
- this->update_image_cache_state();
- need_update_state = true;
- }
+ for (auto &operation : ops) {
+ uint32_t entry_index = this->m_first_free_entry;
+ this->m_first_free_entry = (this->m_first_free_entry + 1) % this->m_total_log_entries;
+ auto &log_entry = operation->get_log_entry();
+ log_entry->log_entry_index = entry_index;
+ log_entry->ram_entry.entry_index = entry_index;
+ log_entry->cache_entry = &pmem_log_entries[entry_index];
+ log_entry->ram_entry.set_entry_valid(true);
+ m_log_entries.push_back(log_entry);
+ ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
}
- if (need_update_state) {
- this->write_image_cache_state();
+ if (m_cache_state->empty && !m_log_entries.empty()) {
+ m_cache_state->empty = false;
+ this->update_image_cache_state();
+ this->write_image_cache_state(locker);
}
}
this->wake_up();
}
if (need_update_state) {
- this->write_image_cache_state();
+ std::unique_lock locker(m_lock);
+ this->write_image_cache_state(locker);
}
} else {
ldout(cct, 20) << "Nothing to retire" << dendl;
template <typename I>
void WriteLog<I>::alloc_op_log_entries(GenericLogOperations &ops) {
- bool need_update_state = false;
- {
- std::lock_guard locker(m_lock);
- for (auto &operation : ops) {
- auto &log_entry = operation->get_log_entry();
- log_entry->ram_entry.set_entry_valid(true);
- m_log_entries.push_back(log_entry);
- ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
- }
- if (m_cache_state->empty && !m_log_entries.empty()) {
- m_cache_state->empty = false;
- this->update_image_cache_state();
- need_update_state = true;
- }
+ std::unique_lock locker(m_lock);
+
+ for (auto &operation : ops) {
+ auto &log_entry = operation->get_log_entry();
+ log_entry->ram_entry.set_entry_valid(true);
+ m_log_entries.push_back(log_entry);
+ ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
}
- if (need_update_state) {
- this->write_image_cache_state();
+ if (m_cache_state->empty && !m_log_entries.empty()) {
+ m_cache_state->empty = false;
+ this->update_image_cache_state();
+ this->write_image_cache_state(locker);
}
}
this->wake_up();
}
if (need_update_state) {
- this->write_image_cache_state();
+ std::unique_lock locker(m_lock);
+ this->write_image_cache_state(locker);
}
this->dispatch_deferred_writes();
image_cache_state.empty = false;
image_cache_state.clean = false;
+ ceph::mutex lock = ceph::make_mutex("MockImageCacheStateRWL lock");
MockContextRWL finish_ctx;
expect_metadata_set(mock_image_ctx);
expect_context_complete(finish_ctx, 0);
- image_cache_state.write_image_cache_state(&finish_ctx);
+ std::unique_lock locker(lock);
+ image_cache_state.write_image_cache_state(locker, &finish_ctx);
+ ASSERT_FALSE(locker.owns_lock());
ASSERT_EQ(0, finish_ctx.wait());
}
image_cache_state.empty = false;
image_cache_state.clean = false;
+ ceph::mutex lock = ceph::make_mutex("MockImageCacheStateSSD lock");
MockContextSSD finish_ctx;
expect_metadata_set(mock_image_ctx);
expect_context_complete(finish_ctx, 0);
- image_cache_state.write_image_cache_state(&finish_ctx);
+ std::unique_lock locker(lock);
+ image_cache_state.write_image_cache_state(locker, &finish_ctx);
+ ASSERT_FALSE(locker.owns_lock());
ASSERT_EQ(0, finish_ctx.wait());
}