Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jun 2009 17:56:46 +0000 (10:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jun 2009 17:56:46 +0000 (10:56 -0700)
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (24 commits)
  tracing/urgent: warn in case of ftrace_start_up inbalance
  tracing/urgent: fix unbalanced ftrace_start_up
  function-graph: add stack frame test
  function-graph: disable when both x86_32 and optimize for size are configured
  ring-buffer: have benchmark test print to trace buffer
  ring-buffer: do not grab locks in nmi
  ring-buffer: add locks around rb_per_cpu_empty
  ring-buffer: check for less than two in size allocation
  ring-buffer: remove useless compile check for buffer_page size
  ring-buffer: remove useless warn on check
  ring-buffer: use BUF_PAGE_HDR_SIZE in calculating index
  tracing: update sample event documentation
  tracing/filters: fix race between filter setting and module unload
  tracing/filters: free filter_string in destroy_preds()
  ring-buffer: use commit counters for commit pointer accounting
  ring-buffer: remove unused variable
  ring-buffer: have benchmark test handle discarded events
  ring-buffer: prevent adding write in discarded area
  tracing/filters: strloc should be unsigned short
  tracing/filters: operand can be negative
  ...

Fix up kmemcheck-induced conflict in kernel/trace/ring_buffer.c manually

1  2 
arch/powerpc/kernel/ftrace.c
arch/x86/Kconfig
arch/x86/kernel/entry_32.S
kernel/trace/Kconfig
kernel/trace/ring_buffer.c
kernel/trace/trace.c

Simple merge
Simple merge
Simple merge
Simple merge
index dc4dc70171ce2236236a396430f228f15b1a732d,589b3eedfa67a2f1b25b70684920d6c59018c646..04dac263825874d69f104df2f33cbd54433a2e40
@@@ -1171,6 -1137,59 +1138,60 @@@ static unsigned rb_calculate_event_leng
        return length;
  }
  
+ static inline void
+ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+             struct buffer_page *tail_page,
+             unsigned long tail, unsigned long length)
+ {
+       struct ring_buffer_event *event;
+       /*
+        * Only the event that crossed the page boundary
+        * must fill the old tail_page with padding.
+        */
+       if (tail >= BUF_PAGE_SIZE) {
+               local_sub(length, &tail_page->write);
+               return;
+       }
+       event = __rb_page_index(tail_page, tail);
++      kmemcheck_annotate_bitfield(event, bitfield);
+       /*
+        * If this event is bigger than the minimum size, then
+        * we need to be careful that we don't subtract the
+        * write counter enough to allow another writer to slip
+        * in on this page.
+        * We put in a discarded commit instead, to make sure
+        * that this space is not used again.
+        *
+        * If we are less than the minimum size, we don't need to
+        * worry about it.
+        */
+       if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
+               /* No room for any events */
+               /* Mark the rest of the page with padding */
+               rb_event_set_padding(event);
+               /* Set the write back to the previous setting */
+               local_sub(length, &tail_page->write);
+               return;
+       }
+       /* Put in a discarded event */
+       event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
+       event->type_len = RINGBUF_TYPE_PADDING;
+       /* time delta must be non zero */
+       event->time_delta = 1;
+       /* Account for this as an entry */
+       local_inc(&tail_page->entries);
+       local_inc(&cpu_buffer->entries);
+       /* Set write to end of buffer */
+       length = (tail + length) - BUF_PAGE_SIZE;
+       local_sub(length, &tail_page->write);
+ }
  
  static struct ring_buffer_event *
  rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
@@@ -1325,11 -1323,7 +1325,8 @@@ __rb_reserve_next(struct ring_buffer_pe
  
        /* We reserved something on the buffer */
  
-       if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
-               return NULL;
        event = __rb_page_index(tail_page, tail);
 +      kmemcheck_annotate_bitfield(event, bitfield);
        rb_update_event(event, type, length);
  
        /* The passed in type is zero for DATA */
Simple merge