bool AbstractWriteLog<I>::check_allocation(C_BlockIORequestT *req,
uint64_t &bytes_cached, uint64_t &bytes_dirtied, uint64_t &bytes_allocated,
uint64_t &num_lanes, uint64_t &num_log_entries,
- uint64_t &num_unpublished_reserves, uint64_t bytes_allocated_cap){
+ uint64_t &num_unpublished_reserves) {
bool alloc_succeeds = true;
bool no_space = false;
{
no_space = true; /* Entries must be retired */
}
/* Don't attempt buffer allocate if we've exceeded the "full" threshold */
- if (m_bytes_allocated + bytes_allocated > bytes_allocated_cap) {
+ if (m_bytes_allocated + bytes_allocated > m_bytes_allocated_cap) {
if (!req->has_io_waited_for_buffers()) {
req->set_io_waited_for_buffers(true);
ldout(m_image_ctx.cct, 1) << "Waiting for allocation cap (cap="
- << bytes_allocated_cap
+ << m_bytes_allocated_cap
<< ", allocated=" << m_bytes_allocated
<< ") in write [" << *req << "]" << dendl;
}
uint64_t &bytes_cached, uint64_t &bytes_dirtied,
uint64_t &bytes_allocated,
uint64_t &num_lanes, uint64_t &num_log_entries,
- uint64_t &num_unpublished_reserves, uint64_t bytes_allocated_cap);
+ uint64_t &num_unpublished_reserves);
void append_scheduled(
pwl::GenericLogOperations &ops, bool &ops_remain, bool &appending,
bool isRWL=false);
req->setup_buffer_resources(&bytes_cached, &bytes_dirtied, &bytes_allocated,
&num_lanes, &num_log_entries, &num_unpublished_reserves);
- alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied, bytes_allocated,
- num_lanes, num_log_entries, num_unpublished_reserves,
- this->m_bytes_allocated_cap);
+ alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
+ bytes_allocated, num_lanes, num_log_entries,
+ num_unpublished_reserves);
std::vector<WriteBufferAllocation>& buffers = req->get_resources_buffers();
if (!alloc_succeeds) {
num_small_writes = MAX_LOG_ENTRIES;
}
assert(num_small_writes > 2);
- m_log_pool_ring_buffer_size = this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
+ /* Size of ring buffer */
+ this->m_bytes_allocated_cap =
+ this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
/* Log ring empty */
m_first_free_entry = DATA_RING_BUFFER_OFFSET;
m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
bytes_allocated, num_lanes,
num_log_entries,
- num_unpublished_reserves,
- m_log_pool_ring_buffer_size);
+ num_unpublished_reserves);
req->set_allocated(alloc_succeeds);
return alloc_succeeds;
}
CephContext *cct = m_image_ctx.cct;
int max_iterations = 4;
bool wake_up_requested = false;
- uint64_t aggressive_high_water_bytes = m_log_pool_ring_buffer_size * AGGRESSIVE_RETIRE_HIGH_WATER;
+ uint64_t aggressive_high_water_bytes =
+ this->m_bytes_allocated_cap * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t aggressive_high_water_entries = this->m_total_log_entries * AGGRESSIVE_RETIRE_HIGH_WATER;
- uint64_t high_water_bytes = m_log_pool_ring_buffer_size * RETIRE_HIGH_WATER;
+ uint64_t high_water_bytes = this->m_bytes_allocated_cap * RETIRE_HIGH_WATER;
uint64_t high_water_entries = this->m_total_log_entries * RETIRE_HIGH_WATER;
ldout(cct, 20) << dendl;
WriteLogPoolRootUpdateList m_poolroot_to_update; /* pool root list to update to SSD */
bool m_updating_pool_root = false;
- uint64_t m_log_pool_ring_buffer_size; /* Size of ring buffer */
std::atomic<int> m_async_update_superblock = {0};
BlockDevice *bdev = nullptr;
uint64_t pool_size;