ring-buffer: Fix a race between readers and resize checks
[linux-2.6-block.git] / kernel / trace / ring_buffer.c
index 42227727a49dbc813f2de08c42e5e2c8b3cf044d..28853966aa9afff9fc6e9a6af90a3eec58902c60 100644 (file)
@@ -1460,6 +1460,11 @@ static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  *
  * As a safety measure we check to make sure the data pages have not
  * been corrupted.
+ *
+ * Callers of this function need to guarantee that the list of pages doesn't get
+ * modified during the check. In particular, if it's possible that the function
+ * is invoked with concurrent readers which can swap in a new reader page then
+ * the caller should take cpu_buffer->reader_lock.
  */
 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
@@ -2210,8 +2215,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
                 */
                synchronize_rcu();
                for_each_buffer_cpu(buffer, cpu) {
+                       unsigned long flags;
+
                        cpu_buffer = buffer->buffers[cpu];
+                       raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
                        rb_check_pages(cpu_buffer);
+                       raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
                }
                atomic_dec(&buffer->record_disabled);
        }