Currently, it will load existing entries after restart and cacl
m_bytes_allocated based on those entries. But currently there are
the following problems:
1: The allocated of write-same is not calculated for rwl & ssd cache.
2: for ssd cache, it not include the size of log-entry itself and don't
consider data alignment. This will cause less calculation and more
allocatation later. And will overwrite the data which don't flush to osd
and make data lost.
The calculation methods of ssd and rwl are different. So add new api
allocated_and_cached_data() to implement their own method.
For SSD cache, we dirtly use m_first_valid_entry & m_first_free_entry to
calc m_bytes_allocated.
trivial fix: new code from PR, nothing unrelated: https://www.diffchecker.com/S1eXatpM
Fixes:https://tracker.ceph.com/issues/52341
Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
(cherry picked from commit
a96ca93d69d5c1f302f3141082302d4699915397)
template <typename I>
void AbstractWriteLog<I>::update_sync_points(std::map<uint64_t, bool> &missing_sync_points,
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> &sync_point_entries,
- DeferredContexts &later, uint32_t alloc_size ) {
+ DeferredContexts &later) {
/* Create missing sync points. These must not be appended until the
* entry reload is complete and the write map is up to
* date. Currently this is handled by the deferred contexts object
gen_write_entry->set_flushed(true);
sync_point_entry->writes_flushed++;
}
- if (log_entry->write_bytes() == log_entry->bytes_dirty()) {
- /* This entry is a basic write */
- uint64_t bytes_allocated = alloc_size;
- if (gen_write_entry->ram_entry.write_bytes > bytes_allocated) {
- bytes_allocated = gen_write_entry->ram_entry.write_bytes;
- }
- m_bytes_allocated += bytes_allocated;
- m_bytes_cached += gen_write_entry->ram_entry.write_bytes;
- }
+
+ /* calc m_bytes_allocated & m_bytes_cached */
+ inc_allocated_cached_bytes(log_entry);
}
}
} else {
std::map<uint64_t, bool> &missing_sync_points,
std::map<uint64_t,
std::shared_ptr<pwl::SyncPointLogEntry>> &sync_point_entries,
- pwl::DeferredContexts &later, uint32_t alloc_size);
+ pwl::DeferredContexts &later);
+ virtual void inc_allocated_cached_bytes(
+ std::shared_ptr<pwl::GenericLogEntry> log_entry) = 0;
Context *construct_flush_entry(
const std::shared_ptr<pwl::GenericLogEntry> log_entry, bool invalidating);
void process_writeback_dirty_entries();
entry_index = (entry_index + 1) % this->m_total_log_entries;
}
- this->update_sync_points(missing_sync_points, sync_point_entries, later, MIN_WRITE_ALLOC_SIZE);
+ this->update_sync_points(missing_sync_points, sync_point_entries, later);
+}
+
+template <typename I>
+void WriteLog<I>::inc_allocated_cached_bytes(
+ std::shared_ptr<pwl::GenericLogEntry> log_entry) {
+ if (log_entry->is_write_entry()) {
+ this->m_bytes_allocated += std::max(log_entry->write_bytes(), MIN_WRITE_ALLOC_SIZE);
+ this->m_bytes_cached += log_entry->write_bytes();
+ }
}
template <typename I>
void flush_op_log_entries(pwl::GenericLogOperationsVector &ops);
template <typename V>
void flush_pmem_buffer(V& ops);
-
+ void inc_allocated_cached_bytes(
+ std::shared_ptr<pwl::GenericLogEntry> log_entry) override;
protected:
using AbstractWriteLog<ImageCtxT>::m_lock;
using AbstractWriteLog<ImageCtxT>::m_log_entries;
if (next_log_pos >= this->m_log_pool_size) {
next_log_pos = next_log_pos % this->m_log_pool_size + DATA_RING_BUFFER_OFFSET;
}
- }
- this->update_sync_points(missing_sync_points, sync_point_entries, later,
- MIN_WRITE_ALLOC_SSD_SIZE);
+ }
+ this->update_sync_points(missing_sync_points, sync_point_entries, later);
+ if (m_first_valid_entry > m_first_free_entry) {
+ m_bytes_allocated = this->m_log_pool_size - m_first_valid_entry +
+ m_first_free_entry - DATA_RING_BUFFER_OFFSET;
+ } else {
+ m_bytes_allocated = m_first_free_entry - m_first_valid_entry;
+ }
+}
+
+// For SSD we don't calc m_bytes_allocated in this
+template <typename I>
+void WriteLog<I>::inc_allocated_cached_bytes(
+ std::shared_ptr<pwl::GenericLogEntry> log_entry) {
+ if (log_entry->is_write_entry()) {
+ this->m_bytes_cached += log_entry->write_bytes();
+ }
}
template <typename I>
Builder<This>* create_builder();
int create_and_open_bdev();
void load_existing_entries(pwl::DeferredContexts &later);
+ void inc_allocated_cached_bytes(
+ std::shared_ptr<pwl::GenericLogEntry> log_entry) override;
void collect_read_extents(
uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry,
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,