ring-buffer: Simplify functions with __free(kfree) to free allocations
authorSteven Rostedt <rostedt@goodmis.org>
Tue, 27 May 2025 18:31:44 +0000 (14:31 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Thu, 29 May 2025 12:24:08 +0000 (08:24 -0400)
The function rb_allocate_pages() allocates cpu_buffer and on error needs
to free it. It has a single return. Use __free(kfree) and return directly
on errors and have the return use return_ptr(cpu_buffer).

The function alloc_buffer() allocates buffer and on error needs to free
it. It has a single return. Use __free(kfree) and return directly on
errors and have the return use return_ptr(buffer).

The function __rb_map_vma() allocates a temporary array "pages". Have it
use __free() and not worry about freeing it when returning.

Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/20250527143144.6edc4625@gandalf.local.home
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index ef1cca8125dfdbc71e87ef0d9c9cc073f971c582..295b6fbfa81ac51e041cf4be6ff1db62b132a554 100644 (file)
@@ -2226,7 +2226,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 static struct ring_buffer_per_cpu *
 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
+       struct ring_buffer_per_cpu *cpu_buffer __free(kfree) = NULL;
        struct ring_buffer_cpu_meta *meta;
        struct buffer_page *bpage;
        struct page *page;
@@ -2252,7 +2252,7 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
        if (!bpage)
-               goto fail_free_buffer;
+               return NULL;
 
        rb_check_bpage(cpu_buffer, bpage);
 
@@ -2318,13 +2318,11 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
                rb_head_page_activate(cpu_buffer);
        }
 
-       return cpu_buffer;
+       return_ptr(cpu_buffer);
 
  fail_free_reader:
        free_buffer_page(cpu_buffer->reader_page);
 
- fail_free_buffer:
-       kfree(cpu_buffer);
        return NULL;
 }
 
@@ -2359,7 +2357,7 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
                                         unsigned long scratch_size,
                                         struct lock_class_key *key)
 {
-       struct trace_buffer *buffer;
+       struct trace_buffer *buffer __free(kfree) = NULL;
        long nr_pages;
        int subbuf_size;
        int bsize;
@@ -2373,7 +2371,7 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
                return NULL;
 
        if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
-               goto fail_free_buffer;
+               return NULL;
 
        buffer->subbuf_order = order;
        subbuf_size = (PAGE_SIZE << order);
@@ -2472,7 +2470,7 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
 
        mutex_init(&buffer->mutex);
 
-       return buffer;
+       return_ptr(buffer);
 
  fail_free_buffers:
        for_each_buffer_cpu(buffer, cpu) {
@@ -2484,8 +2482,6 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
  fail_free_cpumask:
        free_cpumask_var(buffer->cpumask);
 
- fail_free_buffer:
-       kfree(buffer);
        return NULL;
 }
 
@@ -7057,7 +7053,7 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
 {
        unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
        unsigned int subbuf_pages, subbuf_order;
-       struct page **pages;
+       struct page **pages __free(kfree) = NULL;
        int p = 0, s = 0;
        int err;
 
@@ -7125,10 +7121,8 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
                struct page *page;
                int off = 0;
 
-               if (WARN_ON_ONCE(s >= nr_subbufs)) {
-                       err = -EINVAL;
-                       goto out;
-               }
+               if (WARN_ON_ONCE(s >= nr_subbufs))
+                       return -EINVAL;
 
                page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
 
@@ -7143,9 +7137,6 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
 
        err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
 
-out:
-       kfree(pages);
-
        return err;
 }
 #else