ring-buffer: Make ring_buffer_{un}map() simpler with guard(mutex)
authorSteven Rostedt <rostedt@goodmis.org>
Tue, 27 May 2025 16:20:09 +0000 (12:20 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Thu, 29 May 2025 12:24:08 +0000 (08:24 -0400)
Convert the taking of the buffer->mutex and the cpu_buffer->mapping_lock
over to guard(mutex) and simplify the ring_buffer_map() and
ring_buffer_unmap() functions.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Vincent Donnefort <vdonnefort@google.com>
Link: https://lore.kernel.org/20250527122009.267efb72@gandalf.local.home
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index 931bbcc6640f6a3b2953a61eba98176a0f26c19a..ef1cca8125dfdbc71e87ef0d9c9cc073f971c582 100644 (file)
@@ -7161,36 +7161,34 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags, *subbuf_ids;
-       int err = 0;
+       int err;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return -EINVAL;
 
        cpu_buffer = buffer->buffers[cpu];
 
-       mutex_lock(&cpu_buffer->mapping_lock);
+       guard(mutex)(&cpu_buffer->mapping_lock);
 
        if (cpu_buffer->user_mapped) {
                err = __rb_map_vma(cpu_buffer, vma);
                if (!err)
                        err = __rb_inc_dec_mapped(cpu_buffer, true);
-               mutex_unlock(&cpu_buffer->mapping_lock);
                return err;
        }
 
        /* prevent another thread from changing buffer/sub-buffer sizes */
-       mutex_lock(&buffer->mutex);
+       guard(mutex)(&buffer->mutex);
 
        err = rb_alloc_meta_page(cpu_buffer);
        if (err)
-               goto unlock;
+               return err;
 
        /* subbuf_ids include the reader while nr_pages does not */
        subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
        if (!subbuf_ids) {
                rb_free_meta_page(cpu_buffer);
-               err = -ENOMEM;
-               goto unlock;
+               return -ENOMEM;
        }
 
        atomic_inc(&cpu_buffer->resize_disabled);
@@ -7218,35 +7216,29 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
                atomic_dec(&cpu_buffer->resize_disabled);
        }
 
-unlock:
-       mutex_unlock(&buffer->mutex);
-       mutex_unlock(&cpu_buffer->mapping_lock);
-
-       return err;
+       return 0;
 }
 
 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags;
-       int err = 0;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return -EINVAL;
 
        cpu_buffer = buffer->buffers[cpu];
 
-       mutex_lock(&cpu_buffer->mapping_lock);
+       guard(mutex)(&cpu_buffer->mapping_lock);
 
        if (!cpu_buffer->user_mapped) {
-               err = -ENODEV;
-               goto out;
+               return -ENODEV;
        } else if (cpu_buffer->user_mapped > 1) {
                __rb_inc_dec_mapped(cpu_buffer, false);
-               goto out;
+               return 0;
        }
 
-       mutex_lock(&buffer->mutex);
+       guard(mutex)(&buffer->mutex);
        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
        /* This is the last user space mapping */
@@ -7261,12 +7253,7 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
        rb_free_meta_page(cpu_buffer);
        atomic_dec(&cpu_buffer->resize_disabled);
 
-       mutex_unlock(&buffer->mutex);
-
-out:
-       mutex_unlock(&cpu_buffer->mapping_lock);
-
-       return err;
+       return 0;
 }
 
 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)