From: Alex Markuze Date: Wed, 13 Aug 2025 14:53:51 +0000 (+0000) Subject: adding blogger implementation X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=1dea94cd24560f6280752e2cdb6fa73bd0189cb0;p=ceph-client.git adding blogger implementation --- diff --git a/lib/blog/blog_batch.c b/lib/blog/blog_batch.c index 3fda2ba30aeab..7acb5e6926c18 100644 --- a/lib/blog/blog_batch.c +++ b/lib/blog/blog_batch.c @@ -1,56 +1,249 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Binary Logging Batch Management - Stub Implementation + * Binary Logging Batch Management + * + * Migrated from ceph_san_batch.c with all algorithms preserved + * Implements per-CPU magazine-based batching for efficient object recycling */ -#include #include +#include +#include +#include +#include #include +/* Number of magazines to preallocate during initialization */ +#define BLOG_INIT_MAGAZINES 4 + +static struct blog_magazine *alloc_magazine(struct blog_batch *batch) +{ + struct blog_magazine *mag; + + mag = kmem_cache_alloc(batch->magazine_cache, GFP_KERNEL); + if (!mag) + return NULL; + + INIT_LIST_HEAD(&mag->list); + mag->count = 0; + return mag; +} + +static void free_magazine(struct blog_batch *batch, struct blog_magazine *mag) +{ + kmem_cache_free(batch->magazine_cache, mag); +} + /** * blog_batch_init - Initialize the batching system + * @batch: Batch structure to initialize + * + * Allocates and initializes the per-CPU magazines and global pools. + * + * Return: 0 on success, negative error code on failure */ int blog_batch_init(struct blog_batch *batch) { - /* Stub implementation */ - if (!batch) - return -EINVAL; - + int cpu, i; + struct blog_cpu_magazine *cpu_mag; + struct blog_magazine *mag; + + /* Initialize counters */ + batch->nr_full = 0; + batch->nr_empty = 0; + + /* Create magazine cache */ + batch->magazine_cache = kmem_cache_create("blog_magazine", + sizeof(struct blog_magazine), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!batch->magazine_cache) + return -ENOMEM; + + /* Initialize global magazine lists */ INIT_LIST_HEAD(&batch->full_magazines); INIT_LIST_HEAD(&batch->empty_magazines); spin_lock_init(&batch->full_lock); spin_lock_init(&batch->empty_lock); - batch->nr_full = 0; - batch->nr_empty = 0; - + + /* Allocate per-CPU magazines */ + batch->cpu_magazines = alloc_percpu(struct blog_cpu_magazine); + if (!batch->cpu_magazines) + goto cleanup_cache; + + /* Initialize per-CPU magazines */ + for_each_possible_cpu(cpu) { + cpu_mag = per_cpu_ptr(batch->cpu_magazines, cpu); + cpu_mag->mag = NULL; + } + + /* Pre-allocate empty magazines */ + for (i = 0; i < BLOG_INIT_MAGAZINES; i++) { + mag = alloc_magazine(batch); + if (!mag) + goto cleanup; + + spin_lock(&batch->empty_lock); + list_add(&mag->list, &batch->empty_magazines); + batch->nr_empty++; + spin_unlock(&batch->empty_lock); + } + return 0; + +cleanup: + blog_batch_cleanup(batch); + return -ENOMEM; + +cleanup_cache: + kmem_cache_destroy(batch->magazine_cache); + return -ENOMEM; } EXPORT_SYMBOL(blog_batch_init); /** * blog_batch_cleanup - Clean up the batching system + * @batch: Batch structure to clean up */ void blog_batch_cleanup(struct blog_batch *batch) { - /* Stub implementation */ + int cpu; + struct blog_magazine *mag, *tmp; + struct blog_cpu_magazine *cpu_mag; + + /* Free per-CPU magazines */ + if (batch->cpu_magazines) { + for_each_possible_cpu(cpu) { + cpu_mag = per_cpu_ptr(batch->cpu_magazines, cpu); + if (cpu_mag->mag) + free_magazine(batch, cpu_mag->mag); + } + free_percpu(batch->cpu_magazines); + } + + /* Free magazines in the full pool */ + spin_lock(&batch->full_lock); + list_for_each_entry_safe(mag, tmp, &batch->full_magazines, list) { + list_del(&mag->list); + batch->nr_full--; + free_magazine(batch, mag); + } + spin_unlock(&batch->full_lock); + + /* Free magazines in the empty pool */ + spin_lock(&batch->empty_lock); + list_for_each_entry_safe(mag, tmp, &batch->empty_magazines, list) { + list_del(&mag->list); + batch->nr_empty--; + free_magazine(batch, mag); + } + spin_unlock(&batch->empty_lock); + + /* Destroy magazine cache */ + if (batch->magazine_cache) + kmem_cache_destroy(batch->magazine_cache); } EXPORT_SYMBOL(blog_batch_cleanup); /** * blog_batch_get - Get an element from the batch + * @batch: Batch to get element from + * + * Return: Element from the magazine, or NULL if none available */ void *blog_batch_get(struct blog_batch *batch) { - /* Stub implementation */ - return NULL; + struct blog_cpu_magazine *cpu_mag; + struct blog_magazine *old_mag, *new_mag; + void *element = NULL; + + cpu_mag = this_cpu_ptr(batch->cpu_magazines); + + /* If we have a magazine and it has elements, use it */ + if (cpu_mag->mag && cpu_mag->mag->count > 0) { + element = cpu_mag->mag->elements[--cpu_mag->mag->count]; + return element; + } + + /* Current magazine is empty, try to get a full one */ + old_mag = cpu_mag->mag; + + /* Return old magazine to empty pool if we have one */ + if (old_mag) { + spin_lock(&batch->empty_lock); + list_add(&old_mag->list, &batch->empty_magazines); + batch->nr_empty++; + spin_unlock(&batch->empty_lock); + cpu_mag->mag = NULL; + } + + if (batch->nr_full > 0) { + /* Try to get a full magazine */ + spin_lock(&batch->full_lock); + if (!list_empty(&batch->full_magazines)) { + new_mag = list_first_entry(&batch->full_magazines, + struct blog_magazine, list); + list_del(&new_mag->list); + batch->nr_full--; + spin_unlock(&batch->full_lock); + + cpu_mag->mag = new_mag; + if (new_mag->count > 0) + element = new_mag->elements[--new_mag->count]; + } else { + spin_unlock(&batch->full_lock); + } + } + return element; } EXPORT_SYMBOL(blog_batch_get); /** * blog_batch_put - Put an element back into the batch + * @batch: Batch to put element into + * @element: Element to put back */ void blog_batch_put(struct blog_batch *batch, void *element) { - /* Stub implementation */ + struct blog_cpu_magazine *cpu_mag; + struct blog_magazine *mag; + + cpu_mag = this_cpu_ptr(batch->cpu_magazines); + + /* Optimistically try to add to current magazine */ + if (likely(cpu_mag->mag && cpu_mag->mag->count < BLOG_MAGAZINE_SIZE)) { + cpu_mag->mag->elements[cpu_mag->mag->count++] = element; + return; + } + + /* If current magazine is full, move it to full pool */ + if (likely(cpu_mag->mag && cpu_mag->mag->count >= BLOG_MAGAZINE_SIZE)) { + spin_lock(&batch->full_lock); + list_add_tail(&cpu_mag->mag->list, &batch->full_magazines); + batch->nr_full++; + spin_unlock(&batch->full_lock); + cpu_mag->mag = NULL; + } + + /* Get new magazine if needed */ + if (likely(!cpu_mag->mag)) { + /* Try to get from empty pool first */ + spin_lock(&batch->empty_lock); + if (!list_empty(&batch->empty_magazines)) { + mag = list_first_entry(&batch->empty_magazines, + struct blog_magazine, list); + list_del(&mag->list); + batch->nr_empty--; + spin_unlock(&batch->empty_lock); + cpu_mag->mag = mag; + } else { + spin_unlock(&batch->empty_lock); + cpu_mag->mag = alloc_magazine(batch); + } + + if (unlikely(!cpu_mag->mag)) + return; + } + /* Add element to magazine */ + cpu_mag->mag->elements[cpu_mag->mag->count++] = element; } -EXPORT_SYMBOL(blog_batch_put); +EXPORT_SYMBOL(blog_batch_put); \ No newline at end of file diff --git a/lib/blog/blog_core.c b/lib/blog/blog_core.c index e89db113ef853..7ea4b3d623b99 100644 --- a/lib/blog/blog_core.c +++ b/lib/blog/blog_core.c @@ -2,87 +2,480 @@ /* * Binary Logging Infrastructure - Core Implementation * - * This is a stub implementation for Phase 1 infrastructure setup. - * Full implementation will be added in Phase 2. + * Migrated from ceph_san_logger.c with algorithms preserved + * Client ID management removed - modules handle their own mappings */ #include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include +#include +#include +#include +#include + +static void blog_tls_release_verbose(void *ptr); +#define NULL_STR "(NULL)" +#define BLOG_LOG_BATCH_MAX_FULL 16 /* Global logger instance */ struct blog_logger g_blog_logger; EXPORT_SYMBOL(g_blog_logger); /** - * blog_init - Initialize the logging system + * blog_is_valid_kernel_addr - Check if address is in valid kernel address range + * @addr: Address to check * - * Return: 0 on success, negative error code on failure + * Returns true if address is in valid kernel address range */ -int blog_init(void) +bool blog_is_valid_kernel_addr(const void *addr) { - pr_info("BLOG: Binary logging infrastructure initialized (stub)\n"); - return 0; + if (virt_addr_valid(addr)) { + return true; + } + return false; } -EXPORT_SYMBOL(blog_init); +EXPORT_SYMBOL(blog_is_valid_kernel_addr); /** - * blog_cleanup - Clean up the logging system + * get_context_id - Get a unique context ID + * + * Acquires a unique ID for a TLS context using the global counter + * + * Returns a unique context ID */ -void blog_cleanup(void) +static u64 get_context_id(void) { - pr_info("BLOG: Binary logging infrastructure cleanup (stub)\n"); + u64 id; + spin_lock(&g_blog_logger.ctx_id_lock); + id = g_blog_logger.next_ctx_id++; + spin_unlock(&g_blog_logger.ctx_id_lock); + return id; } -EXPORT_SYMBOL(blog_cleanup); /** - * blog_get_source_id - Get or create source ID + * validate_tls_ctx - Validate a TLS context + * @ctx: Context to validate + * + * Returns true if context is valid, false otherwise */ -u32 blog_get_source_id(const char *file, const char *func, unsigned int line, const char *fmt) +static inline bool validate_tls_ctx(struct blog_tls_ctx *ctx) { - /* Stub implementation */ - return 0; + if (!ctx) + return false; + +#if BLOG_DEBUG_POISON + if (ctx->debug_poison != BLOG_CTX_POISON) { + pr_err("BUG: TLS context id=%llu (%llx) has invalid debug_poison value 0x%llx\n", + ctx->id, (unsigned long long)ctx, (unsigned long long)ctx->debug_poison); + return false; + } +#endif + + if (atomic_read(&ctx->refcount) != 1) { + pr_err("BUG: TLS context id=%llu (%llx) refcount %d, expected 1\n", + ctx->id, (unsigned long long)ctx, atomic_read(&ctx->refcount)); + return false; + } + + return true; +} + +static inline struct blog_tls_ctx *get_tls_ctx(void) +{ + struct blog_tls_ctx *ctx = current->tls_ctx; + if (likely(ctx)) { + ctx = container_of((void *)ctx, struct blog_tls_ctx, release); + } + return ctx; } -EXPORT_SYMBOL(blog_get_source_id); /** - * blog_get_source_info - Get source information for ID + * add_context_to_global_list - Add a context to the global list + * @ctx: The context to add to the global list + * + * Adds the context to the global list of contexts and updates stats */ -struct blog_source_info *blog_get_source_info(u32 id) +static void add_context_to_global_list(struct blog_tls_ctx *ctx) { - /* Stub implementation */ - return NULL; + spin_lock(&g_blog_logger.lock); + list_add(&ctx->list, &g_blog_logger.contexts); + g_blog_logger.total_contexts_allocated++; + spin_unlock(&g_blog_logger.lock); +} + +static void *alloc_tls_ctx(void) +{ + struct blog_tls_ctx *ctx; + ctx = kmem_cache_alloc(g_blog_logger.alloc_batch.magazine_cache, GFP_KERNEL); + if (!ctx) { + pr_err("Failed to allocate TLS context from magazine cache\n"); + return NULL; + } + + /* Initialize pagefrag */ + memset(&ctx->pf, 0, sizeof(ctx->pf)); + if (blog_pagefrag_init(&ctx->pf)) { + pr_err("Failed to initialize pagefrag for TLS context\n"); + kmem_cache_free(g_blog_logger.alloc_batch.magazine_cache, ctx); + return NULL; + } + + /* Assign unique ID and initialize debug poison */ +#if BLOG_DEBUG_POISON + ctx->debug_poison = BLOG_CTX_POISON; +#endif + atomic_set(&ctx->refcount, 0); + ctx->id = get_context_id(); + add_context_to_global_list(ctx); + + ctx->release = blog_tls_release_verbose; + + pr_debug("[%d]blog: initialized refcount=0 for new context id=%llu (%llx)\n", + smp_processor_id(), ctx->id, (unsigned long long)ctx); + + return ctx; +} + +static inline struct blog_tls_ctx *get_new_ctx(void) +{ + struct blog_tls_ctx *ctx; + + /* Try to get context from batch first */ + ctx = blog_batch_get(&g_blog_logger.alloc_batch); + if (!ctx) { + /* Create new context if batch is empty */ + ctx = alloc_tls_ctx(); + if (!ctx) + return NULL; + } + +#if BLOG_DEBUG_POISON + /* Verify debug poison on context from batch or fresh allocation */ + if (ctx->debug_poison != BLOG_CTX_POISON) { + pr_err("BUG: Context id=%llu from batch/alloc has invalid debug_poison 0x%llx\n", + ctx->id, (unsigned long long)ctx->debug_poison); + BUG(); + } +#endif + + ctx->base_jiffies = jiffies; + blog_pagefrag_reset(&ctx->pf); + blog_logger_print_stats(&g_blog_logger); + return ctx; /* Context returned with refcount = 0 */ } -EXPORT_SYMBOL(blog_get_source_info); /** - * blog_log - Log a message + * is_valid_active_ctx - Validate an active TLS context + * @ctx: Context to validate + * @context_description: String describing the context for error messages + * + * Returns true if context is valid (poison OK, refcount == 1), false otherwise */ -void* blog_log(u32 source_id, u8 client_id, size_t needed_size) +static inline bool is_valid_active_ctx(struct blog_tls_ctx *ctx, const char *context_description) { - /* Stub implementation */ - return NULL; + if (!ctx) { + pr_err("BUG: %s context is NULL.\n", context_description); + return false; + } + +#if BLOG_DEBUG_POISON + if (ctx->debug_poison != BLOG_CTX_POISON) { + pr_err("BUG: %s context id=%llu (%llx) has invalid debug_poison value 0x%llx\n", + context_description, ctx->id, (unsigned long long)ctx, + (unsigned long long)ctx->debug_poison); + return false; + } +#endif + + if (atomic_read(&ctx->refcount) != 1) { + pr_err("BUG: %s context id=%llu (%llx) refcount %d, expected 1\n", + context_description, ctx->id, (unsigned long long)ctx, + atomic_read(&ctx->refcount)); + return false; + } + return true; +} + +static void free_tls_ctx(void *ptr) +{ + struct blog_tls_ctx *ctx = ptr; + + if (!ctx) { + pr_err("BUG: Trying to free NULL TLS context\n"); + return; + } + +#if BLOG_DEBUG_POISON + if (ctx->debug_poison != BLOG_CTX_POISON) { + pr_err("BUG: TLS context id=%llu has invalid debug_poison value 0x%llx\n", + ctx->id, (unsigned long long)ctx->debug_poison); + return; + } +#endif + + if (atomic_read(&ctx->refcount) != 0) { + pr_err("BUG: Freeing TLS context id=%llu with non-zero refcount %d\n", + ctx->id, atomic_read(&ctx->refcount)); + return; + } + + pr_err("blog: freeing context id=%llu\n", ctx->id); + blog_pagefrag_deinit(&ctx->pf); + kmem_cache_free(g_blog_logger.alloc_batch.magazine_cache, ctx); +} + +/* Release function for TLS storage */ +static void blog_tls_release(void *ptr) +{ + struct blog_tls_ctx *ctx = ptr; + + if (!ctx) + return; + + if (atomic_dec_return(&ctx->refcount) != 0) { + pr_err("BUG: TLS context id=%llu refcount %d after release\n", + ctx->id, atomic_read(&ctx->refcount)); + panic("blog: TLS context id=%llu refcount %d after release\n", ctx->id, atomic_read(&ctx->refcount)); + } + pr_debug("blog: decremented refcount=0 for context id=%llu\n", ctx->id); + + /* Add context to log batch */ + ctx->task = NULL; + pr_debug("blog: releasing TLS context for pid %d [%s]\n", + ctx->pid, ctx->comm); + blog_batch_put(&g_blog_logger.log_batch, ctx); + + /* If log_batch has too many full magazines, move one to alloc_batch */ + if (g_blog_logger.log_batch.nr_full > BLOG_LOG_BATCH_MAX_FULL) { + struct blog_magazine *mag; + spin_lock(&g_blog_logger.log_batch.full_lock); + if (!list_empty(&g_blog_logger.log_batch.full_magazines)) { + mag = list_first_entry(&g_blog_logger.log_batch.full_magazines, + struct blog_magazine, list); + list_del(&mag->list); + g_blog_logger.log_batch.nr_full--; + spin_unlock(&g_blog_logger.log_batch.full_lock); + + spin_lock(&g_blog_logger.alloc_batch.full_lock); + list_add(&mag->list, &g_blog_logger.alloc_batch.full_magazines); + g_blog_logger.alloc_batch.nr_full++; + spin_unlock(&g_blog_logger.alloc_batch.full_lock); + } else { + spin_unlock(&g_blog_logger.log_batch.full_lock); + } + } +} + +static void blog_tls_release_verbose(void *ptr) +{ + struct blog_tls_ctx *ctx = container_of(ptr, struct blog_tls_ctx, release); + if (!ctx) { + pr_err("blog -- Callback : invalid TLS context pointer %d\n", current->pid); + return; + } +#if BLOG_DEBUG_POISON + if (ctx->debug_poison != BLOG_CTX_POISON) { + pr_err("blog -- Callback : invalid TLS context id=%llu has invalid debug_poison value 0x%llx\n", + ctx->id, (unsigned long long)ctx->debug_poison); + BUG(); + } +#endif + if (atomic_read(&ctx->refcount) != 1) { + pr_err("blog -- Callback : invalid TLS context refcount %d for pid %d [%s]\n", + atomic_read(&ctx->refcount), ctx->pid, ctx->comm); + BUG(); + } + blog_tls_release(ctx); } -EXPORT_SYMBOL(blog_log); /** - * blog_get_tls_ctx - Get current TLS context + * blog_get_tls_ctx - Get or create TLS context for current task + * + * Returns pointer to TLS context or NULL on error */ struct blog_tls_ctx *blog_get_tls_ctx(void) { - /* Stub implementation */ - return NULL; + struct blog_tls_ctx *ctx = get_tls_ctx(); + + if (ctx) { + if (!is_valid_active_ctx(ctx, "Existing TLS")) { + current->tls_ctx = NULL; + BUG(); + } + return ctx; + } + + /* Create new context */ + pr_debug("blog: creating new TLS context for pid %d [%s]\n", + current->pid, current->comm); + + ctx = get_new_ctx(); + if (!ctx) + return NULL; + + /* Set up TLS specific parts */ + current->tls_ctx = (void *)&ctx->release; + ctx->task = current; + ctx->pid = current->pid; + strncpy(ctx->comm, current->comm, TASK_COMM_LEN); + ctx->comm[TASK_COMM_LEN - 1] = '\0'; + + /* Increment refcount from 0 to 1 */ + if (atomic_inc_return(&ctx->refcount) != 1) { + pr_err("BUG: Failed to set refcount=1 for new TLS context id=%llu (was %d before inc)\n", + ctx->id, atomic_read(&ctx->refcount) - 1); + current->tls_ctx = NULL; + BUG(); + } + + pr_debug("blog: successfully created new TLS context id=%llu for pid %d [%s]\n", + ctx->id, ctx->pid, ctx->comm); + return ctx; } EXPORT_SYMBOL(blog_get_tls_ctx); +/** + * blog_get_source_id - Get or create a source ID for the given location + * @file: Source file name + * @func: Function name + * @line: Line number + * @fmt: Format string + * + * Returns a unique ID for this source location + */ +u32 blog_get_source_id(const char *file, const char *func, unsigned int line, const char *fmt) +{ + u32 id = atomic_inc_return(&g_blog_logger.next_source_id); + + if (id >= BLOG_MAX_SOURCE_IDS) { + /* If we run out of IDs, just use the first one */ + pr_warn("blog: source ID overflow, reusing ID 1\n"); + id = 1; + } + + /* Store the source information in the global map */ + g_blog_logger.source_map[id].file = file; + g_blog_logger.source_map[id].func = func; + g_blog_logger.source_map[id].line = line; + g_blog_logger.source_map[id].fmt = fmt; + g_blog_logger.source_map[id].warn_count = 0; + return id; +} +EXPORT_SYMBOL(blog_get_source_id); + +/** + * blog_get_source_info - Get source info for a given ID + * @id: Source ID + * + * Returns the source information for this ID + */ +struct blog_source_info *blog_get_source_info(u32 id) +{ + if (unlikely(id == 0 || id >= BLOG_MAX_SOURCE_IDS)) + return NULL; + return &g_blog_logger.source_map[id]; +} +EXPORT_SYMBOL(blog_get_source_info); + +/** + * blog_log - Log a message + * @source_id: Source ID for this location + * @client_id: Client ID for this message (module-specific) + * @needed_size: Size needed for the message + * + * Returns a buffer to write the message into + */ +void* blog_log(u32 source_id, u8 client_id, size_t needed_size) +{ + struct blog_tls_ctx *ctx; + struct blog_log_entry *entry = NULL; + int alloc; + int retry_count = 0; + +#if BLOG_TRACK_USAGE + struct blog_source_info *source; +#endif + needed_size = round_up(needed_size + sizeof(struct blog_log_entry), 8); +#if BLOG_TRACK_USAGE + /* Get source info to update stats */ + source = blog_get_source_info(source_id); + if (unlikely(source)) { + if (in_serving_softirq()) { + atomic_inc(&source->napi_usage); + atomic_add(needed_size, &source->napi_bytes); + } else { + atomic_inc(&source->task_usage); + atomic_add(needed_size, &source->task_bytes); + } + } +#endif + + while (entry == NULL) { + ctx = blog_get_ctx(); + if (!ctx) { + pr_err("Failed to get TLS context\n"); + return NULL; + } + if (!blog_is_valid_kernel_addr(ctx)) { + pr_err("blog_log: invalid TLS context address: %pK\n", ctx); + return NULL; + } + if (unlikely(retry_count)) { + pr_debug("[%d]Retrying allocation with ctx %llu (%s, pid %d) (retry %d, needed_size=%zu @ %d)\n", + smp_processor_id(), ctx->id, ctx->comm, ctx->pid, retry_count, needed_size, source_id); + } + + alloc = blog_pagefrag_alloc(&ctx->pf, needed_size); + if (alloc == -ENOMEM) { + pr_debug("blog_log: allocation failed (needed %zu), resetting context\n", needed_size); + blog_pagefrag_reset(&ctx->pf); + retry_count++; + if (retry_count > 3) { + pr_err("blog_log: failed to allocate after 3 retries\n"); + return NULL; + } + continue; + } + + entry = blog_pagefrag_get_ptr(&ctx->pf, alloc); + if (!entry) { + pr_err("blog_log: failed to get pointer from pagefrag\n"); + return NULL; + } + ctx->pf.last_entry = entry; + } + +#if BLOG_DEBUG_POISON + entry->debug_poison = BLOG_LOG_ENTRY_POISON; +#endif + entry->ts_delta = (u32)(jiffies - ctx->base_jiffies); + entry->source_id = (u16)source_id; + entry->client_id = client_id; + entry->len = (u8)needed_size; + return entry->buffer; +} +EXPORT_SYMBOL(blog_log); + /** * blog_get_napi_ctx - Get NAPI context for current CPU */ struct blog_tls_ctx *blog_get_napi_ctx(void) { - /* Stub implementation */ + /* NAPI context implementation would go here */ + /* For now, return NULL as it's not critical for basic operation */ return NULL; } EXPORT_SYMBOL(blog_get_napi_ctx); @@ -92,7 +485,7 @@ EXPORT_SYMBOL(blog_get_napi_ctx); */ void blog_set_napi_ctx(struct blog_tls_ctx *ctx) { - /* Stub implementation */ + /* NAPI context implementation would go here */ } EXPORT_SYMBOL(blog_set_napi_ctx); @@ -101,8 +494,10 @@ EXPORT_SYMBOL(blog_set_napi_ctx); */ struct blog_tls_ctx *blog_get_ctx(void) { - /* Stub implementation */ - return NULL; + if (in_serving_softirq()) { + return blog_get_napi_ctx(); + } + return blog_get_tls_ctx(); } EXPORT_SYMBOL(blog_get_ctx); @@ -111,7 +506,11 @@ EXPORT_SYMBOL(blog_get_ctx); */ int blog_log_trim(unsigned int n) { - /* Stub implementation */ + struct blog_tls_ctx *ctx = blog_get_ctx(); + if (!ctx) + return -EINVAL; + + blog_pagefrag_trim(&ctx->pf, n); return 0; } EXPORT_SYMBOL(blog_log_trim); @@ -121,7 +520,14 @@ EXPORT_SYMBOL(blog_log_trim); */ void blog_log_iter_init(struct blog_log_iter *iter, struct blog_pagefrag *pf) { - /* Stub implementation */ + if (!iter || !pf) + return; + + iter->pf = pf; + iter->current_offset = 0; + iter->end_offset = pf->head; + iter->prev_offset = 0; + iter->steps = 0; } EXPORT_SYMBOL(blog_log_iter_init); @@ -130,8 +536,20 @@ EXPORT_SYMBOL(blog_log_iter_init); */ struct blog_log_entry *blog_log_iter_next(struct blog_log_iter *iter) { - /* Stub implementation */ - return NULL; + struct blog_log_entry *entry; + + if (!iter || iter->current_offset >= iter->end_offset) + return NULL; + + entry = blog_pagefrag_get_ptr(iter->pf, iter->current_offset); + if (!entry) + return NULL; + + iter->prev_offset = iter->current_offset; + iter->current_offset += round_up(sizeof(struct blog_log_entry) + entry->len, 8); + iter->steps++; + + return entry; } EXPORT_SYMBOL(blog_log_iter_next); @@ -141,20 +559,107 @@ EXPORT_SYMBOL(blog_log_iter_next); int blog_des_entry(struct blog_log_entry *entry, char *output, size_t out_size, blog_client_des_fn client_cb) { - /* Stub implementation */ - return 0; + int len = 0; + struct blog_source_info *source; + + if (!entry || !output) + return -EINVAL; + + /* Let module handle client_id if callback provided */ + if (client_cb) { + len = client_cb(output, out_size, entry->client_id); + if (len < 0) + return len; + } + + /* Get source info */ + source = blog_get_source_info(entry->source_id); + if (!source) { + len += snprintf(output + len, out_size - len, "[unknown source %u]", entry->source_id); + return len; + } + + /* Add source location */ + len += snprintf(output + len, out_size - len, "[%s:%s:%u] ", + source->file, source->func, source->line); + + /* Deserialize the buffer content */ + len += blog_des_reconstruct(source->fmt, entry->buffer, 0, entry->len, + output + len, out_size - len); + + return len; } EXPORT_SYMBOL(blog_des_entry); /** - * blog_is_valid_kernel_addr - Check if address is valid + * blog_init - Initialize the logging system + * + * Return: 0 on success, negative error code on failure */ -bool blog_is_valid_kernel_addr(const void *addr) +int blog_init(void) { - /* Stub implementation */ - return virt_addr_valid(addr); + int ret; + + /* Initialize global logger structure */ + INIT_LIST_HEAD(&g_blog_logger.contexts); + spin_lock_init(&g_blog_logger.lock); + spin_lock_init(&g_blog_logger.source_lock); + spin_lock_init(&g_blog_logger.ctx_id_lock); + + atomic_set(&g_blog_logger.next_source_id, 0); + g_blog_logger.total_contexts_allocated = 0; + g_blog_logger.next_ctx_id = 1; + + /* Initialize batch systems */ + ret = blog_batch_init(&g_blog_logger.alloc_batch); + if (ret) { + pr_err("blog: Failed to initialize alloc batch\n"); + return ret; + } + + ret = blog_batch_init(&g_blog_logger.log_batch); + if (ret) { + pr_err("blog: Failed to initialize log batch\n"); + blog_batch_cleanup(&g_blog_logger.alloc_batch); + return ret; + } + + /* Allocate per-CPU NAPI contexts (optional for now) */ + g_blog_logger.napi_ctxs = NULL; /* Will implement later if needed */ + + pr_info("BLOG: Binary logging infrastructure initialized\n"); + return 0; } -EXPORT_SYMBOL(blog_is_valid_kernel_addr); +EXPORT_SYMBOL(blog_init); + +/** + * blog_cleanup - Clean up the logging system + */ +void blog_cleanup(void) +{ + struct blog_tls_ctx *ctx, *tmp; + + /* Clean up any remaining contexts */ + spin_lock(&g_blog_logger.lock); + list_for_each_entry_safe(ctx, tmp, &g_blog_logger.contexts, list) { + list_del(&ctx->list); + free_tls_ctx(ctx); + } + spin_unlock(&g_blog_logger.lock); + + /* Clean up batch systems */ + blog_batch_cleanup(&g_blog_logger.alloc_batch); + blog_batch_cleanup(&g_blog_logger.log_batch); + + /* Free per-CPU NAPI contexts if allocated */ + if (g_blog_logger.napi_ctxs) { + free_percpu(g_blog_logger.napi_ctxs); + g_blog_logger.napi_ctxs = NULL; + } + + pr_info("BLOG: Binary logging infrastructure cleanup complete\n"); +} +EXPORT_SYMBOL(blog_cleanup); static int __init blog_module_init(void) { @@ -171,4 +676,4 @@ module_exit(blog_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Binary Logging Infrastructure"); -MODULE_AUTHOR("Linux Kernel Community"); +MODULE_AUTHOR("Linux Kernel Community"); \ No newline at end of file diff --git a/lib/blog/blog_pagefrag.c b/lib/blog/blog_pagefrag.c index 61622dcb4f876..48e29c28df089 100644 --- a/lib/blog/blog_pagefrag.c +++ b/lib/blog/blog_pagefrag.c @@ -1,104 +1,207 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Binary Logging Page Fragment Management - Stub Implementation + * Binary Logging Page Fragment Management + * + * Migrated from ceph_san_pagefrag.c with all algorithms preserved */ -#include +#include #include +#include +#include #include +/** + * blog_pagefrag_init - Initialize the pagefrag allocator. + * + * Allocates a 512KB contiguous buffer and resets head and tail pointers. + * + * Return: 0 on success, negative error code on failure. + */ int blog_pagefrag_init(struct blog_pagefrag *pf) { - /* Stub implementation */ - if (!pf) - return -EINVAL; - - memset(pf, 0, sizeof(*pf)); spin_lock_init(&pf->lock); + pf->pages = alloc_pages(GFP_KERNEL, get_order(BLOG_PAGEFRAG_SIZE)); + if (!pf->pages) { + pr_err("blog_pagefrag_init: alloc_pages failed\n"); + return -ENOMEM; + } + + pf->buffer = page_address(pf->pages); + pf->head = 0; + pf->active_elements = 0; + pf->alloc_count = 0; + pf->last_entry = NULL; + memset(pf->buffer, 0xc, BLOG_PAGEFRAG_SIZE); + pr_debug("blog_pagefrag_init: buffer range %llx - %llx\n", + (unsigned long long)pf->buffer, (unsigned long long)pf->buffer + BLOG_PAGEFRAG_SIZE); return 0; } EXPORT_SYMBOL(blog_pagefrag_init); +/** + * blog_pagefrag_init_with_buffer - Initialize pagefrag with an existing buffer + * @pf: pagefrag allocator to initialize + * @buffer: pre-allocated buffer to use + * @size: size of the buffer + * + * Return: 0 on success + */ int blog_pagefrag_init_with_buffer(struct blog_pagefrag *pf, void *buffer, size_t size) { - /* Stub implementation */ - if (!pf || !buffer) - return -EINVAL; - - memset(pf, 0, sizeof(*pf)); spin_lock_init(&pf->lock); + pf->pages = NULL; /* No pages allocated, using provided buffer */ pf->buffer = buffer; + pf->head = 0; + pf->active_elements = 0; + pf->alloc_count = 0; + pf->last_entry = NULL; return 0; } EXPORT_SYMBOL(blog_pagefrag_init_with_buffer); +/** + * blog_pagefrag_alloc - Allocate bytes from the pagefrag buffer. + * @n: number of bytes to allocate. + * + * Allocates @n bytes if there is sufficient free space in the buffer. + * Advances the head pointer by @n bytes (wrapping around if needed). + * + * Return: offset to the allocated memory, or negative error if not enough space. + */ int blog_pagefrag_alloc(struct blog_pagefrag *pf, unsigned int n) { - /* Stub implementation */ - return 0; + u64 offset; + if (pf->head + n > BLOG_PAGEFRAG_SIZE) { + return -ENOMEM; /* No space left */ + } + offset = pf->head; + pf->head += n; + pf->alloc_count++; + pf->active_elements++; + return offset; } EXPORT_SYMBOL(blog_pagefrag_alloc); +/** + * blog_pagefrag_get_ptr - Get buffer pointer from pagefrag allocation result + * @pf: pagefrag allocator + * @val: return value from blog_pagefrag_alloc + * + * Return: pointer to allocated buffer region + */ +void *blog_pagefrag_get_ptr(struct blog_pagefrag *pf, u64 val) +{ + void *rc = (void *)(pf->buffer + val); + if (unlikely(pf->pages && pf->buffer != page_address(pf->pages))) { + pr_err("blog_pagefrag_get_ptr: invalid buffer pointer %llx @ %s\n", + (unsigned long long)pf->buffer, current->comm); + BUG(); + } + if (unlikely((rc) < pf->buffer || (rc) >= (pf->buffer + BLOG_PAGEFRAG_SIZE))) { + pr_err("blog_pagefrag_get_ptr: invalid pointer %llx\n", (unsigned long long)rc); + BUG(); + } + return rc; +} +EXPORT_SYMBOL(blog_pagefrag_get_ptr); + +/** + * blog_pagefrag_get_ptr_from_tail - Get pointer from tail (not implemented in original) + */ void *blog_pagefrag_get_ptr_from_tail(struct blog_pagefrag *pf) { - /* Stub implementation */ + /* This function was not in the original ceph_san implementation */ return NULL; } EXPORT_SYMBOL(blog_pagefrag_get_ptr_from_tail); +/** + * blog_pagefrag_free - Free bytes from pagefrag (not implemented in original) + */ void blog_pagefrag_free(struct blog_pagefrag *pf, unsigned int n) { - /* Stub implementation */ + /* This function was not in the original ceph_san implementation */ } EXPORT_SYMBOL(blog_pagefrag_free); +/** + * blog_pagefrag_deinit - Deinitialize the pagefrag allocator. + * + * Frees the allocated buffer and resets the head and tail pointers. + */ void blog_pagefrag_deinit(struct blog_pagefrag *pf) { - /* Stub implementation */ + if (pf->pages) { + free_pages((unsigned long)pf->pages, get_order(BLOG_PAGEFRAG_SIZE)); + pf->pages = NULL; + } + /* Don't free buffer if it was provided externally */ + pf->buffer = NULL; + pf->head = 0; } EXPORT_SYMBOL(blog_pagefrag_deinit); +/** + * blog_pagefrag_reset - Reset the pagefrag allocator. + * + * Resets the head and tail pointers to the beginning of the buffer. + */ void blog_pagefrag_reset(struct blog_pagefrag *pf) { - /* Stub implementation */ - if (pf) { - pf->head = 0; - pf->alloc_count = 0; - pf->active_elements = 0; - pf->last_entry = NULL; - } + spin_lock(&pf->lock); + pf->head = 0; + pf->active_elements = 0; + pf->alloc_count = 0; + pf->last_entry = NULL; + spin_unlock(&pf->lock); } EXPORT_SYMBOL(blog_pagefrag_reset); -void *blog_pagefrag_get_ptr(struct blog_pagefrag *pf, u64 val) +/** + * blog_pagefrag_trim_head - Trim bytes from head + */ +void blog_pagefrag_trim_head(struct blog_pagefrag *pf, unsigned int n) { - /* Stub implementation */ - return NULL; + if (n > pf->head) + pf->head = 0; + else + pf->head -= n; } -EXPORT_SYMBOL(blog_pagefrag_get_ptr); +EXPORT_SYMBOL(blog_pagefrag_trim_head); + +/** + * blog_pagefrag_trim - Trim bytes from pagefrag + */ +void blog_pagefrag_trim(struct blog_pagefrag *pf, unsigned int n) +{ + if (n >= pf->head) { + pf->head = 0; + pf->active_elements = 0; + pf->alloc_count = 0; + pf->last_entry = NULL; + } else { + pf->head -= n; + } +} +EXPORT_SYMBOL(blog_pagefrag_trim); +/** + * blog_pagefrag_is_wraparound - Check if allocation wrapped around + */ bool blog_pagefrag_is_wraparound(u64 val) { - /* Stub implementation */ + /* Not implemented in original - stub for now */ return false; } EXPORT_SYMBOL(blog_pagefrag_is_wraparound); +/** + * blog_pagefrag_get_alloc_size - Get allocation size from result + */ u64 blog_pagefrag_get_alloc_size(u64 val) { - /* Stub implementation */ + /* Not implemented in original - stub for now */ return 0; } EXPORT_SYMBOL(blog_pagefrag_get_alloc_size); - -void blog_pagefrag_trim_head(struct blog_pagefrag *pf, unsigned int n) -{ - /* Stub implementation */ -} -EXPORT_SYMBOL(blog_pagefrag_trim_head); - -void blog_pagefrag_trim(struct blog_pagefrag *pf, unsigned int n) -{ - /* Stub implementation */ -} -EXPORT_SYMBOL(blog_pagefrag_trim);