/* We need one free log entry per extent (each is a separate entry), and
* one free "lane" for remote replication. */
if ((m_free_lanes >= num_lanes) &&
- (m_free_log_entries >= num_log_entries)) {
+ (m_free_log_entries >= num_log_entries) &&
+ (m_bytes_allocated_cap >= m_bytes_allocated + bytes_allocated)) {
m_free_lanes -= num_lanes;
m_free_log_entries -= num_log_entries;
m_unpublished_reserves += num_unpublished_reserves;
if (req->has_io_waited_for_buffers()) {
req->set_io_waited_for_buffers(false);
}
-
} else {
alloc_succeeds = false;
}
m_cache_state->empty = true;
/* new pool, calculate and store metadata */
- /* Size of ring buffer */
- this->m_bytes_allocated_cap =
- this->m_log_pool_size - DATA_RING_BUFFER_OFFSET;
+ /* Keep ring buffer at least MIN_WRITE_ALLOC_SSD_SIZE bytes free.
+ * In this way, when all ring buffer spaces are allocated,
+ * m_first_free_entry and m_first_valid_entry will not be equal.
+ * Equal only means the cache is empty. */
+ this->m_bytes_allocated_cap = this->m_log_pool_size -
+ DATA_RING_BUFFER_OFFSET - MIN_WRITE_ALLOC_SSD_SIZE;
/* Log ring empty */
m_first_free_entry = DATA_RING_BUFFER_OFFSET;
m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
this->m_first_valid_entry = pool_root.first_valid_entry;
this->m_first_free_entry = pool_root.first_free_entry;
- this->m_bytes_allocated_cap =
- this->m_log_pool_size - DATA_RING_BUFFER_OFFSET;
+ this->m_bytes_allocated_cap = this->m_log_pool_size -
+ DATA_RING_BUFFER_OFFSET - MIN_WRITE_ALLOC_SSD_SIZE;
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> sync_point_entries;