"tp_pwl", 4, ""),
m_cache_state(cache_state),
m_image_ctx(image_ctx),
- m_log_pool_config_size(DEFAULT_POOL_SIZE),
+ m_log_pool_size(DEFAULT_POOL_SIZE),
m_image_writeback(image_writeback),
m_plugin_api(plugin_api),
m_log_retire_lock(ceph::make_mutex(pwl::unique_lock_name(
ldout(cct,5) << "pwl_path: " << m_cache_state->path << dendl;
m_log_pool_name = m_cache_state->path;
- m_log_pool_config_size = max(m_cache_state->size, MIN_POOL_SIZE);
+ m_log_pool_size = max(m_cache_state->size, MIN_POOL_SIZE);
if ((!m_cache_state->present) &&
(access(m_log_pool_name.c_str(), F_OK) == 0)) {
ImageCtxT &m_image_ctx;
std::string m_log_pool_name;
- uint64_t m_log_pool_config_size; /* Configured size of RWL */
+ uint64_t m_log_pool_size;
uint32_t m_total_log_entries = 0;
uint32_t m_free_log_entries = 0;
if ((m_log_pool =
pmemobj_create(this->m_log_pool_name.c_str(),
this->m_pwl_pool_layout_name,
- this->m_log_pool_config_size,
+ this->m_log_pool_size,
(S_IWUSR | S_IRUSR))) == NULL) {
lderr(cct) << "failed to create pool (" << this->m_log_pool_name << ")"
<< pmemobj_errormsg() << dendl;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
/* new pool, calculate and store metadata */
- size_t effective_pool_size = (size_t)(this->m_log_pool_config_size * USABLE_SIZE);
+ size_t effective_pool_size = (size_t)(this->m_log_pool_size * USABLE_SIZE);
size_t small_write_size = MIN_WRITE_ALLOC_SIZE + BLOCK_ALLOC_OVERHEAD_BYTES + sizeof(struct WriteLogCacheEntry);
uint64_t num_small_writes = (uint64_t)(effective_pool_size / small_write_size);
if (num_small_writes > MAX_LOG_ENTRIES) {
D_RW(pool_root)->log_entries =
TX_ZALLOC(struct WriteLogCacheEntry,
sizeof(struct WriteLogCacheEntry) * num_small_writes);
- D_RW(pool_root)->pool_size = this->m_log_pool_config_size;
+ D_RW(pool_root)->pool_size = this->m_log_pool_size;
D_RW(pool_root)->flushed_sync_gen = this->m_flushed_sync_gen;
D_RW(pool_root)->block_size = MIN_WRITE_ALLOC_SIZE;
D_RW(pool_root)->num_log_entries = num_small_writes;
on_finish->complete(-EINVAL);
return false;
}
- this->m_log_pool_config_size = D_RO(pool_root)->pool_size;
+ this->m_log_pool_size = D_RO(pool_root)->pool_size;
this->m_flushed_sync_gen = D_RO(pool_root)->flushed_sync_gen;
this->m_total_log_entries = D_RO(pool_root)->num_log_entries;
m_first_free_entry = D_RO(pool_root)->first_free_entry;
* entries, and n-1 free log entries */
this->m_free_log_entries = this->m_total_log_entries - (m_first_free_entry - m_first_valid_entry) -1;
}
- size_t effective_pool_size = (size_t)(this->m_log_pool_config_size * USABLE_SIZE);
+ size_t effective_pool_size = (size_t)(this->m_log_pool_size * USABLE_SIZE);
this->m_bytes_allocated_cap = effective_pool_size;
load_existing_entries(later);
m_cache_state->clean = this->m_dirty_log_entries.empty();
bool succeed = true;
if (fd >= 0) {
if (truncate(this->m_log_pool_name.c_str(),
- this->m_log_pool_config_size) != 0) {
+ this->m_log_pool_size) != 0) {
succeed = false;
}
::close(fd);
/* new pool, calculate and store metadata */
size_t small_write_size = MIN_WRITE_ALLOC_SSD_SIZE + sizeof(struct WriteLogCacheEntry);
- uint64_t num_small_writes = (uint64_t)(this->m_log_pool_config_size / small_write_size);
+ uint64_t num_small_writes = (uint64_t)(this->m_log_pool_size / small_write_size);
if (num_small_writes > MAX_LOG_ENTRIES) {
num_small_writes = MAX_LOG_ENTRIES;
}
assert(num_small_writes > 2);
/* Size of ring buffer */
this->m_bytes_allocated_cap =
- this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
+ this->m_log_pool_size - DATA_RING_BUFFER_OFFSET;
/* Log ring empty */
m_first_free_entry = DATA_RING_BUFFER_OFFSET;
m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
auto new_root = std::make_shared<WriteLogPoolRoot>(pool_root);
- new_root->pool_size = this->m_log_pool_config_size;
+ new_root->pool_size = this->m_log_pool_size;
new_root->flushed_sync_gen = this->m_flushed_sync_gen;
new_root->block_size = MIN_WRITE_ALLOC_SSD_SIZE;
new_root->first_free_entry = m_first_free_entry;
m_first_valid_entry = next_log_pos;
this->m_total_log_entries = current_pool_root.num_log_entries;
this->m_flushed_sync_gen = current_pool_root.flushed_sync_gen;
- this->m_log_pool_config_size = current_pool_root.pool_size;
+ this->m_log_pool_size = current_pool_root.pool_size;
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> sync_point_entries;
}
// along with the write_bytes, add control block size too
next_log_pos += MIN_WRITE_ALLOC_SSD_SIZE;
- if (next_log_pos >= this->m_log_pool_config_size) {
- next_log_pos = next_log_pos % this->m_log_pool_config_size + DATA_RING_BUFFER_OFFSET;
+ if (next_log_pos >= this->m_log_pool_size) {
+ next_log_pos = next_log_pos % this->m_log_pool_size + DATA_RING_BUFFER_OFFSET;
}
}
this->update_sync_points(missing_sync_points, sync_point_entries, later,
ldout(cct, 20) << "The log entry is " << *(*it) << dendl;
if ((*it)->log_entry_index < control_block_pos) {
ceph_assert((*it)->log_entry_index ==
- (control_block_pos + data_length + MIN_WRITE_ALLOC_SSD_SIZE)
- % this->m_log_pool_config_size + DATA_RING_BUFFER_OFFSET);
+ (control_block_pos + data_length + MIN_WRITE_ALLOC_SSD_SIZE) %
+ this->m_log_pool_size + DATA_RING_BUFFER_OFFSET);
} else {
ceph_assert((*it)->log_entry_index == control_block_pos +
data_length + MIN_WRITE_ALLOC_SSD_SIZE);
} else {
first_valid_entry = entry->log_entry_index + MIN_WRITE_ALLOC_SSD_SIZE;
}
- if (first_valid_entry >= this->m_log_pool_config_size) {
- first_valid_entry = first_valid_entry % this->m_log_pool_config_size +
+ if (first_valid_entry >= this->m_log_pool_size) {
+ first_valid_entry = first_valid_entry % this->m_log_pool_size +
DATA_RING_BUFFER_OFFSET;
}
ceph_assert(first_valid_entry != initial_first_valid_entry);