ring-buffer: add reader lock
[linux-2.6-block.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
182e9f5f
SR
19#include "trace.h"
20
7a8e76a3
SR
21/* Up this if you want to test the TIME_EXTENTS and normalization */
22#define DEBUG_SHIFT 0
23
24/* FIXME!!! */
25u64 ring_buffer_time_stamp(int cpu)
26{
27 /* shift to debug/test normalization and TIME_EXTENTS */
28 return sched_clock() << DEBUG_SHIFT;
29}
30
31void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
32{
33 /* Just stupid testing the normalize function and deltas */
34 *ts >>= DEBUG_SHIFT;
35}
36
37#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
38#define RB_ALIGNMENT_SHIFT 2
39#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
40#define RB_MAX_SMALL_DATA 28
41
42enum {
43 RB_LEN_TIME_EXTEND = 8,
44 RB_LEN_TIME_STAMP = 16,
45};
46
47/* inline for ring buffer fast paths */
48static inline unsigned
49rb_event_length(struct ring_buffer_event *event)
50{
51 unsigned length;
52
53 switch (event->type) {
54 case RINGBUF_TYPE_PADDING:
55 /* undefined */
56 return -1;
57
58 case RINGBUF_TYPE_TIME_EXTEND:
59 return RB_LEN_TIME_EXTEND;
60
61 case RINGBUF_TYPE_TIME_STAMP:
62 return RB_LEN_TIME_STAMP;
63
64 case RINGBUF_TYPE_DATA:
65 if (event->len)
66 length = event->len << RB_ALIGNMENT_SHIFT;
67 else
68 length = event->array[0];
69 return length + RB_EVNT_HDR_SIZE;
70 default:
71 BUG();
72 }
73 /* not hit */
74 return 0;
75}
76
77/**
78 * ring_buffer_event_length - return the length of the event
79 * @event: the event to get the length of
80 */
81unsigned ring_buffer_event_length(struct ring_buffer_event *event)
82{
83 return rb_event_length(event);
84}
85
86/* inline for ring buffer fast paths */
87static inline void *
88rb_event_data(struct ring_buffer_event *event)
89{
90 BUG_ON(event->type != RINGBUF_TYPE_DATA);
91 /* If length is in len field, then array[0] has the data */
92 if (event->len)
93 return (void *)&event->array[0];
94 /* Otherwise length is in array[0] and array[1] has the data */
95 return (void *)&event->array[1];
96}
97
98/**
99 * ring_buffer_event_data - return the data of the event
100 * @event: the event to get the data from
101 */
102void *ring_buffer_event_data(struct ring_buffer_event *event)
103{
104 return rb_event_data(event);
105}
106
107#define for_each_buffer_cpu(buffer, cpu) \
108 for_each_cpu_mask(cpu, buffer->cpumask)
109
110#define TS_SHIFT 27
111#define TS_MASK ((1ULL << TS_SHIFT) - 1)
112#define TS_DELTA_TEST (~TS_MASK)
113
114/*
115 * This hack stolen from mm/slob.c.
116 * We can store per page timing information in the page frame of the page.
117 * Thanks to Peter Zijlstra for suggesting this idea.
118 */
119struct buffer_page {
e4c2ce82 120 u64 time_stamp; /* page time stamp */
bf41a158
SR
121 local_t write; /* index for next write */
122 local_t commit; /* write commited index */
6f807acd 123 unsigned read; /* index for next read */
e4c2ce82
SR
124 struct list_head list; /* list of free pages */
125 void *page; /* Actual data page */
7a8e76a3
SR
126};
127
ed56829c
SR
128/*
129 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
130 * this issue out.
131 */
132static inline void free_buffer_page(struct buffer_page *bpage)
133{
e4c2ce82 134 if (bpage->page)
6ae2a076 135 free_page((unsigned long)bpage->page);
e4c2ce82 136 kfree(bpage);
ed56829c
SR
137}
138
7a8e76a3
SR
139/*
140 * We need to fit the time_stamp delta into 27 bits.
141 */
142static inline int test_time_stamp(u64 delta)
143{
144 if (delta & TS_DELTA_TEST)
145 return 1;
146 return 0;
147}
148
149#define BUF_PAGE_SIZE PAGE_SIZE
150
151/*
152 * head_page == tail_page && head == tail then buffer is empty.
153 */
154struct ring_buffer_per_cpu {
155 int cpu;
156 struct ring_buffer *buffer;
f83c9d0f 157 spinlock_t reader_lock; /* serialize readers */
3e03fb7f 158 raw_spinlock_t lock;
7a8e76a3
SR
159 struct lock_class_key lock_key;
160 struct list_head pages;
6f807acd
SR
161 struct buffer_page *head_page; /* read from head */
162 struct buffer_page *tail_page; /* write to tail */
bf41a158 163 struct buffer_page *commit_page; /* commited pages */
d769041f 164 struct buffer_page *reader_page;
7a8e76a3
SR
165 unsigned long overrun;
166 unsigned long entries;
167 u64 write_stamp;
168 u64 read_stamp;
169 atomic_t record_disabled;
170};
171
172struct ring_buffer {
173 unsigned long size;
174 unsigned pages;
175 unsigned flags;
176 int cpus;
177 cpumask_t cpumask;
178 atomic_t record_disabled;
179
180 struct mutex mutex;
181
182 struct ring_buffer_per_cpu **buffers;
183};
184
185struct ring_buffer_iter {
186 struct ring_buffer_per_cpu *cpu_buffer;
187 unsigned long head;
188 struct buffer_page *head_page;
189 u64 read_stamp;
190};
191
f536aafc 192/* buffer may be either ring_buffer or ring_buffer_per_cpu */
bf41a158
SR
193#define RB_WARN_ON(buffer, cond) \
194 do { \
195 if (unlikely(cond)) { \
196 atomic_inc(&buffer->record_disabled); \
197 WARN_ON(1); \
198 } \
199 } while (0)
200
201#define RB_WARN_ON_RET(buffer, cond) \
f536aafc
SR
202 do { \
203 if (unlikely(cond)) { \
204 atomic_inc(&buffer->record_disabled); \
205 WARN_ON(1); \
206 return; \
207 } \
208 } while (0)
209
210#define RB_WARN_ON_RET_INT(buffer, cond) \
bf41a158
SR
211 do { \
212 if (unlikely(cond)) { \
213 atomic_inc(&buffer->record_disabled); \
214 WARN_ON(1); \
215 return -1; \
216 } \
217 } while (0)
218
f536aafc
SR
219#define RB_WARN_ON_RET_NULL(buffer, cond) \
220 do { \
221 if (unlikely(cond)) { \
222 atomic_inc(&buffer->record_disabled); \
223 WARN_ON(1); \
224 return NULL; \
225 } \
226 } while (0)
227
bf41a158
SR
228#define RB_WARN_ON_ONCE(buffer, cond) \
229 do { \
230 static int once; \
231 if (unlikely(cond) && !once) { \
232 once++; \
233 atomic_inc(&buffer->record_disabled); \
234 WARN_ON(1); \
235 } \
236 } while (0)
7a8e76a3 237
f536aafc
SR
238/* buffer must be ring_buffer not per_cpu */
239#define RB_WARN_ON_UNLOCK(buffer, cond) \
240 do { \
241 if (unlikely(cond)) { \
242 mutex_unlock(&buffer->mutex); \
243 atomic_inc(&buffer->record_disabled); \
244 WARN_ON(1); \
245 return -1; \
246 } \
247 } while (0)
248
7a8e76a3
SR
249/**
250 * check_pages - integrity check of buffer pages
251 * @cpu_buffer: CPU buffer with pages to test
252 *
253 * As a safty measure we check to make sure the data pages have not
254 * been corrupted.
255 */
256static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
257{
258 struct list_head *head = &cpu_buffer->pages;
259 struct buffer_page *page, *tmp;
260
f536aafc
SR
261 RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head);
262 RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head);
7a8e76a3
SR
263
264 list_for_each_entry_safe(page, tmp, head, list) {
f536aafc 265 RB_WARN_ON_RET_INT(cpu_buffer,
bf41a158 266 page->list.next->prev != &page->list);
f536aafc 267 RB_WARN_ON_RET_INT(cpu_buffer,
bf41a158 268 page->list.prev->next != &page->list);
7a8e76a3
SR
269 }
270
271 return 0;
272}
273
7a8e76a3
SR
274static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
275 unsigned nr_pages)
276{
277 struct list_head *head = &cpu_buffer->pages;
278 struct buffer_page *page, *tmp;
279 unsigned long addr;
280 LIST_HEAD(pages);
281 unsigned i;
282
283 for (i = 0; i < nr_pages; i++) {
e4c2ce82 284 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
aa1e0e3b 285 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
e4c2ce82
SR
286 if (!page)
287 goto free_pages;
288 list_add(&page->list, &pages);
289
7a8e76a3
SR
290 addr = __get_free_page(GFP_KERNEL);
291 if (!addr)
292 goto free_pages;
e4c2ce82 293 page->page = (void *)addr;
7a8e76a3
SR
294 }
295
296 list_splice(&pages, head);
297
298 rb_check_pages(cpu_buffer);
299
300 return 0;
301
302 free_pages:
303 list_for_each_entry_safe(page, tmp, &pages, list) {
304 list_del_init(&page->list);
ed56829c 305 free_buffer_page(page);
7a8e76a3
SR
306 }
307 return -ENOMEM;
308}
309
310static struct ring_buffer_per_cpu *
311rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
312{
313 struct ring_buffer_per_cpu *cpu_buffer;
e4c2ce82 314 struct buffer_page *page;
d769041f 315 unsigned long addr;
7a8e76a3
SR
316 int ret;
317
318 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
319 GFP_KERNEL, cpu_to_node(cpu));
320 if (!cpu_buffer)
321 return NULL;
322
323 cpu_buffer->cpu = cpu;
324 cpu_buffer->buffer = buffer;
f83c9d0f 325 spin_lock_init(&cpu_buffer->reader_lock);
3e03fb7f 326 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3
SR
327 INIT_LIST_HEAD(&cpu_buffer->pages);
328
e4c2ce82
SR
329 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
330 GFP_KERNEL, cpu_to_node(cpu));
331 if (!page)
332 goto fail_free_buffer;
333
334 cpu_buffer->reader_page = page;
d769041f
SR
335 addr = __get_free_page(GFP_KERNEL);
336 if (!addr)
e4c2ce82
SR
337 goto fail_free_reader;
338 page->page = (void *)addr;
339
d769041f 340 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 341
7a8e76a3
SR
342 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
343 if (ret < 0)
d769041f 344 goto fail_free_reader;
7a8e76a3
SR
345
346 cpu_buffer->head_page
347 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 348 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
349
350 return cpu_buffer;
351
d769041f
SR
352 fail_free_reader:
353 free_buffer_page(cpu_buffer->reader_page);
354
7a8e76a3
SR
355 fail_free_buffer:
356 kfree(cpu_buffer);
357 return NULL;
358}
359
360static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
361{
362 struct list_head *head = &cpu_buffer->pages;
363 struct buffer_page *page, *tmp;
364
d769041f
SR
365 list_del_init(&cpu_buffer->reader_page->list);
366 free_buffer_page(cpu_buffer->reader_page);
367
7a8e76a3
SR
368 list_for_each_entry_safe(page, tmp, head, list) {
369 list_del_init(&page->list);
ed56829c 370 free_buffer_page(page);
7a8e76a3
SR
371 }
372 kfree(cpu_buffer);
373}
374
a7b13743
SR
375/*
376 * Causes compile errors if the struct buffer_page gets bigger
377 * than the struct page.
378 */
379extern int ring_buffer_page_too_big(void);
380
7a8e76a3
SR
381/**
382 * ring_buffer_alloc - allocate a new ring_buffer
383 * @size: the size in bytes that is needed.
384 * @flags: attributes to set for the ring buffer.
385 *
386 * Currently the only flag that is available is the RB_FL_OVERWRITE
387 * flag. This flag means that the buffer will overwrite old data
388 * when the buffer wraps. If this flag is not set, the buffer will
389 * drop data when the tail hits the head.
390 */
391struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
392{
393 struct ring_buffer *buffer;
394 int bsize;
395 int cpu;
396
a7b13743
SR
397 /* Paranoid! Optimizes out when all is well */
398 if (sizeof(struct buffer_page) > sizeof(struct page))
399 ring_buffer_page_too_big();
400
401
7a8e76a3
SR
402 /* keep it in its own cache line */
403 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
404 GFP_KERNEL);
405 if (!buffer)
406 return NULL;
407
408 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
409 buffer->flags = flags;
410
411 /* need at least two pages */
412 if (buffer->pages == 1)
413 buffer->pages++;
414
415 buffer->cpumask = cpu_possible_map;
416 buffer->cpus = nr_cpu_ids;
417
418 bsize = sizeof(void *) * nr_cpu_ids;
419 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
420 GFP_KERNEL);
421 if (!buffer->buffers)
422 goto fail_free_buffer;
423
424 for_each_buffer_cpu(buffer, cpu) {
425 buffer->buffers[cpu] =
426 rb_allocate_cpu_buffer(buffer, cpu);
427 if (!buffer->buffers[cpu])
428 goto fail_free_buffers;
429 }
430
431 mutex_init(&buffer->mutex);
432
433 return buffer;
434
435 fail_free_buffers:
436 for_each_buffer_cpu(buffer, cpu) {
437 if (buffer->buffers[cpu])
438 rb_free_cpu_buffer(buffer->buffers[cpu]);
439 }
440 kfree(buffer->buffers);
441
442 fail_free_buffer:
443 kfree(buffer);
444 return NULL;
445}
446
447/**
448 * ring_buffer_free - free a ring buffer.
449 * @buffer: the buffer to free.
450 */
451void
452ring_buffer_free(struct ring_buffer *buffer)
453{
454 int cpu;
455
456 for_each_buffer_cpu(buffer, cpu)
457 rb_free_cpu_buffer(buffer->buffers[cpu]);
458
459 kfree(buffer);
460}
461
462static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
463
464static void
465rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
466{
467 struct buffer_page *page;
468 struct list_head *p;
469 unsigned i;
470
471 atomic_inc(&cpu_buffer->record_disabled);
472 synchronize_sched();
473
474 for (i = 0; i < nr_pages; i++) {
f536aafc 475 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
7a8e76a3
SR
476 p = cpu_buffer->pages.next;
477 page = list_entry(p, struct buffer_page, list);
478 list_del_init(&page->list);
ed56829c 479 free_buffer_page(page);
7a8e76a3 480 }
f536aafc 481 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
7a8e76a3
SR
482
483 rb_reset_cpu(cpu_buffer);
484
485 rb_check_pages(cpu_buffer);
486
487 atomic_dec(&cpu_buffer->record_disabled);
488
489}
490
491static void
492rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
493 struct list_head *pages, unsigned nr_pages)
494{
495 struct buffer_page *page;
496 struct list_head *p;
497 unsigned i;
498
499 atomic_inc(&cpu_buffer->record_disabled);
500 synchronize_sched();
501
502 for (i = 0; i < nr_pages; i++) {
f536aafc 503 RB_WARN_ON_RET(cpu_buffer, list_empty(pages));
7a8e76a3
SR
504 p = pages->next;
505 page = list_entry(p, struct buffer_page, list);
506 list_del_init(&page->list);
507 list_add_tail(&page->list, &cpu_buffer->pages);
508 }
509 rb_reset_cpu(cpu_buffer);
510
511 rb_check_pages(cpu_buffer);
512
513 atomic_dec(&cpu_buffer->record_disabled);
514}
515
516/**
517 * ring_buffer_resize - resize the ring buffer
518 * @buffer: the buffer to resize.
519 * @size: the new size.
520 *
521 * The tracer is responsible for making sure that the buffer is
522 * not being used while changing the size.
523 * Note: We may be able to change the above requirement by using
524 * RCU synchronizations.
525 *
526 * Minimum size is 2 * BUF_PAGE_SIZE.
527 *
528 * Returns -1 on failure.
529 */
530int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
531{
532 struct ring_buffer_per_cpu *cpu_buffer;
533 unsigned nr_pages, rm_pages, new_pages;
534 struct buffer_page *page, *tmp;
535 unsigned long buffer_size;
536 unsigned long addr;
537 LIST_HEAD(pages);
538 int i, cpu;
539
540 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
541 size *= BUF_PAGE_SIZE;
542 buffer_size = buffer->pages * BUF_PAGE_SIZE;
543
544 /* we need a minimum of two pages */
545 if (size < BUF_PAGE_SIZE * 2)
546 size = BUF_PAGE_SIZE * 2;
547
548 if (size == buffer_size)
549 return size;
550
551 mutex_lock(&buffer->mutex);
552
553 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
554
555 if (size < buffer_size) {
556
557 /* easy case, just free pages */
f536aafc 558 RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages);
7a8e76a3
SR
559
560 rm_pages = buffer->pages - nr_pages;
561
562 for_each_buffer_cpu(buffer, cpu) {
563 cpu_buffer = buffer->buffers[cpu];
564 rb_remove_pages(cpu_buffer, rm_pages);
565 }
566 goto out;
567 }
568
569 /*
570 * This is a bit more difficult. We only want to add pages
571 * when we can allocate enough for all CPUs. We do this
572 * by allocating all the pages and storing them on a local
573 * link list. If we succeed in our allocation, then we
574 * add these pages to the cpu_buffers. Otherwise we just free
575 * them all and return -ENOMEM;
576 */
f536aafc
SR
577 RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages);
578
7a8e76a3
SR
579 new_pages = nr_pages - buffer->pages;
580
581 for_each_buffer_cpu(buffer, cpu) {
582 for (i = 0; i < new_pages; i++) {
e4c2ce82
SR
583 page = kzalloc_node(ALIGN(sizeof(*page),
584 cache_line_size()),
585 GFP_KERNEL, cpu_to_node(cpu));
586 if (!page)
587 goto free_pages;
588 list_add(&page->list, &pages);
7a8e76a3
SR
589 addr = __get_free_page(GFP_KERNEL);
590 if (!addr)
591 goto free_pages;
e4c2ce82 592 page->page = (void *)addr;
7a8e76a3
SR
593 }
594 }
595
596 for_each_buffer_cpu(buffer, cpu) {
597 cpu_buffer = buffer->buffers[cpu];
598 rb_insert_pages(cpu_buffer, &pages, new_pages);
599 }
600
f536aafc 601 RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages));
7a8e76a3
SR
602
603 out:
604 buffer->pages = nr_pages;
605 mutex_unlock(&buffer->mutex);
606
607 return size;
608
609 free_pages:
610 list_for_each_entry_safe(page, tmp, &pages, list) {
611 list_del_init(&page->list);
ed56829c 612 free_buffer_page(page);
7a8e76a3
SR
613 }
614 return -ENOMEM;
615}
616
7a8e76a3
SR
617static inline int rb_null_event(struct ring_buffer_event *event)
618{
619 return event->type == RINGBUF_TYPE_PADDING;
620}
621
6f807acd 622static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
7a8e76a3 623{
e4c2ce82 624 return page->page + index;
7a8e76a3
SR
625}
626
627static inline struct ring_buffer_event *
d769041f 628rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 629{
6f807acd
SR
630 return __rb_page_index(cpu_buffer->reader_page,
631 cpu_buffer->reader_page->read);
632}
633
634static inline struct ring_buffer_event *
635rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
636{
637 return __rb_page_index(cpu_buffer->head_page,
638 cpu_buffer->head_page->read);
7a8e76a3
SR
639}
640
641static inline struct ring_buffer_event *
642rb_iter_head_event(struct ring_buffer_iter *iter)
643{
6f807acd 644 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
645}
646
bf41a158
SR
647static inline unsigned rb_page_write(struct buffer_page *bpage)
648{
649 return local_read(&bpage->write);
650}
651
652static inline unsigned rb_page_commit(struct buffer_page *bpage)
653{
654 return local_read(&bpage->commit);
655}
656
657/* Size is determined by what has been commited */
658static inline unsigned rb_page_size(struct buffer_page *bpage)
659{
660 return rb_page_commit(bpage);
661}
662
663static inline unsigned
664rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
665{
666 return rb_page_commit(cpu_buffer->commit_page);
667}
668
669static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
670{
671 return rb_page_commit(cpu_buffer->head_page);
672}
673
7a8e76a3
SR
674/*
675 * When the tail hits the head and the buffer is in overwrite mode,
676 * the head jumps to the next page and all content on the previous
677 * page is discarded. But before doing so, we update the overrun
678 * variable of the buffer.
679 */
680static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
681{
682 struct ring_buffer_event *event;
683 unsigned long head;
684
685 for (head = 0; head < rb_head_size(cpu_buffer);
686 head += rb_event_length(event)) {
687
6f807acd 688 event = __rb_page_index(cpu_buffer->head_page, head);
f536aafc 689 RB_WARN_ON_RET(cpu_buffer, rb_null_event(event));
7a8e76a3
SR
690 /* Only count data entries */
691 if (event->type != RINGBUF_TYPE_DATA)
692 continue;
693 cpu_buffer->overrun++;
694 cpu_buffer->entries--;
695 }
696}
697
698static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
699 struct buffer_page **page)
700{
701 struct list_head *p = (*page)->list.next;
702
703 if (p == &cpu_buffer->pages)
704 p = p->next;
705
706 *page = list_entry(p, struct buffer_page, list);
707}
708
bf41a158
SR
709static inline unsigned
710rb_event_index(struct ring_buffer_event *event)
711{
712 unsigned long addr = (unsigned long)event;
713
714 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
715}
716
717static inline int
718rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
719 struct ring_buffer_event *event)
720{
721 unsigned long addr = (unsigned long)event;
722 unsigned long index;
723
724 index = rb_event_index(event);
725 addr &= PAGE_MASK;
726
727 return cpu_buffer->commit_page->page == (void *)addr &&
728 rb_commit_index(cpu_buffer) == index;
729}
730
7a8e76a3 731static inline void
bf41a158
SR
732rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
733 struct ring_buffer_event *event)
7a8e76a3 734{
bf41a158
SR
735 unsigned long addr = (unsigned long)event;
736 unsigned long index;
737
738 index = rb_event_index(event);
739 addr &= PAGE_MASK;
740
741 while (cpu_buffer->commit_page->page != (void *)addr) {
742 RB_WARN_ON(cpu_buffer,
743 cpu_buffer->commit_page == cpu_buffer->tail_page);
744 cpu_buffer->commit_page->commit =
745 cpu_buffer->commit_page->write;
746 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
747 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
748 }
749
750 /* Now set the commit to the event's index */
751 local_set(&cpu_buffer->commit_page->commit, index);
7a8e76a3
SR
752}
753
bf41a158
SR
754static inline void
755rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 756{
bf41a158
SR
757 /*
758 * We only race with interrupts and NMIs on this CPU.
759 * If we own the commit event, then we can commit
760 * all others that interrupted us, since the interruptions
761 * are in stack format (they finish before they come
762 * back to us). This allows us to do a simple loop to
763 * assign the commit to the tail.
764 */
765 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
766 cpu_buffer->commit_page->commit =
767 cpu_buffer->commit_page->write;
768 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
769 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
770 /* add barrier to keep gcc from optimizing too much */
771 barrier();
772 }
773 while (rb_commit_index(cpu_buffer) !=
774 rb_page_write(cpu_buffer->commit_page)) {
775 cpu_buffer->commit_page->commit =
776 cpu_buffer->commit_page->write;
777 barrier();
778 }
7a8e76a3
SR
779}
780
d769041f 781static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 782{
d769041f 783 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
6f807acd 784 cpu_buffer->reader_page->read = 0;
d769041f
SR
785}
786
787static inline void rb_inc_iter(struct ring_buffer_iter *iter)
788{
789 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
790
791 /*
792 * The iterator could be on the reader page (it starts there).
793 * But the head could have moved, since the reader was
794 * found. Check for this case and assign the iterator
795 * to the head page instead of next.
796 */
797 if (iter->head_page == cpu_buffer->reader_page)
798 iter->head_page = cpu_buffer->head_page;
799 else
800 rb_inc_page(cpu_buffer, &iter->head_page);
801
7a8e76a3
SR
802 iter->read_stamp = iter->head_page->time_stamp;
803 iter->head = 0;
804}
805
806/**
807 * ring_buffer_update_event - update event type and data
808 * @event: the even to update
809 * @type: the type of event
810 * @length: the size of the event field in the ring buffer
811 *
812 * Update the type and data fields of the event. The length
813 * is the actual size that is written to the ring buffer,
814 * and with this, we can determine what to place into the
815 * data field.
816 */
817static inline void
818rb_update_event(struct ring_buffer_event *event,
819 unsigned type, unsigned length)
820{
821 event->type = type;
822
823 switch (type) {
824
825 case RINGBUF_TYPE_PADDING:
826 break;
827
828 case RINGBUF_TYPE_TIME_EXTEND:
829 event->len =
830 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
831 >> RB_ALIGNMENT_SHIFT;
832 break;
833
834 case RINGBUF_TYPE_TIME_STAMP:
835 event->len =
836 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
837 >> RB_ALIGNMENT_SHIFT;
838 break;
839
840 case RINGBUF_TYPE_DATA:
841 length -= RB_EVNT_HDR_SIZE;
842 if (length > RB_MAX_SMALL_DATA) {
843 event->len = 0;
844 event->array[0] = length;
845 } else
846 event->len =
847 (length + (RB_ALIGNMENT-1))
848 >> RB_ALIGNMENT_SHIFT;
849 break;
850 default:
851 BUG();
852 }
853}
854
855static inline unsigned rb_calculate_event_length(unsigned length)
856{
857 struct ring_buffer_event event; /* Used only for sizeof array */
858
859 /* zero length can cause confusions */
860 if (!length)
861 length = 1;
862
863 if (length > RB_MAX_SMALL_DATA)
864 length += sizeof(event.array[0]);
865
866 length += RB_EVNT_HDR_SIZE;
867 length = ALIGN(length, RB_ALIGNMENT);
868
869 return length;
870}
871
872static struct ring_buffer_event *
873__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
874 unsigned type, unsigned long length, u64 *ts)
875{
d769041f 876 struct buffer_page *tail_page, *head_page, *reader_page;
bf41a158 877 unsigned long tail, write;
7a8e76a3
SR
878 struct ring_buffer *buffer = cpu_buffer->buffer;
879 struct ring_buffer_event *event;
bf41a158 880 unsigned long flags;
7a8e76a3
SR
881
882 tail_page = cpu_buffer->tail_page;
bf41a158
SR
883 write = local_add_return(length, &tail_page->write);
884 tail = write - length;
7a8e76a3 885
bf41a158
SR
886 /* See if we shot pass the end of this buffer page */
887 if (write > BUF_PAGE_SIZE) {
7a8e76a3
SR
888 struct buffer_page *next_page = tail_page;
889
3e03fb7f
SR
890 local_irq_save(flags);
891 __raw_spin_lock(&cpu_buffer->lock);
bf41a158 892
7a8e76a3
SR
893 rb_inc_page(cpu_buffer, &next_page);
894
d769041f
SR
895 head_page = cpu_buffer->head_page;
896 reader_page = cpu_buffer->reader_page;
897
898 /* we grabbed the lock before incrementing */
bf41a158
SR
899 RB_WARN_ON(cpu_buffer, next_page == reader_page);
900
901 /*
902 * If for some reason, we had an interrupt storm that made
903 * it all the way around the buffer, bail, and warn
904 * about it.
905 */
906 if (unlikely(next_page == cpu_buffer->commit_page)) {
907 WARN_ON_ONCE(1);
908 goto out_unlock;
909 }
d769041f 910
7a8e76a3 911 if (next_page == head_page) {
d769041f 912 if (!(buffer->flags & RB_FL_OVERWRITE)) {
bf41a158
SR
913 /* reset write */
914 if (tail <= BUF_PAGE_SIZE)
915 local_set(&tail_page->write, tail);
916 goto out_unlock;
d769041f 917 }
7a8e76a3 918
bf41a158
SR
919 /* tail_page has not moved yet? */
920 if (tail_page == cpu_buffer->tail_page) {
921 /* count overflows */
922 rb_update_overflow(cpu_buffer);
923
924 rb_inc_page(cpu_buffer, &head_page);
925 cpu_buffer->head_page = head_page;
926 cpu_buffer->head_page->read = 0;
927 }
928 }
7a8e76a3 929
bf41a158
SR
930 /*
931 * If the tail page is still the same as what we think
932 * it is, then it is up to us to update the tail
933 * pointer.
934 */
935 if (tail_page == cpu_buffer->tail_page) {
936 local_set(&next_page->write, 0);
937 local_set(&next_page->commit, 0);
938 cpu_buffer->tail_page = next_page;
939
940 /* reread the time stamp */
941 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
942 cpu_buffer->tail_page->time_stamp = *ts;
7a8e76a3
SR
943 }
944
bf41a158
SR
945 /*
946 * The actual tail page has moved forward.
947 */
948 if (tail < BUF_PAGE_SIZE) {
949 /* Mark the rest of the page with padding */
6f807acd 950 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
951 event->type = RINGBUF_TYPE_PADDING;
952 }
953
bf41a158
SR
954 if (tail <= BUF_PAGE_SIZE)
955 /* Set the write back to the previous setting */
956 local_set(&tail_page->write, tail);
957
958 /*
959 * If this was a commit entry that failed,
960 * increment that too
961 */
962 if (tail_page == cpu_buffer->commit_page &&
963 tail == rb_commit_index(cpu_buffer)) {
964 rb_set_commit_to_write(cpu_buffer);
965 }
966
3e03fb7f
SR
967 __raw_spin_unlock(&cpu_buffer->lock);
968 local_irq_restore(flags);
bf41a158
SR
969
970 /* fail and let the caller try again */
971 return ERR_PTR(-EAGAIN);
7a8e76a3
SR
972 }
973
bf41a158
SR
974 /* We reserved something on the buffer */
975
f536aafc 976 RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE);
7a8e76a3 977
6f807acd 978 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
979 rb_update_event(event, type, length);
980
bf41a158
SR
981 /*
982 * If this is a commit and the tail is zero, then update
983 * this page's time stamp.
984 */
985 if (!tail && rb_is_commit(cpu_buffer, event))
986 cpu_buffer->commit_page->time_stamp = *ts;
987
7a8e76a3 988 return event;
bf41a158
SR
989
990 out_unlock:
3e03fb7f
SR
991 __raw_spin_unlock(&cpu_buffer->lock);
992 local_irq_restore(flags);
bf41a158 993 return NULL;
7a8e76a3
SR
994}
995
996static int
997rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
998 u64 *ts, u64 *delta)
999{
1000 struct ring_buffer_event *event;
1001 static int once;
bf41a158 1002 int ret;
7a8e76a3
SR
1003
1004 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1005 printk(KERN_WARNING "Delta way too big! %llu"
1006 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1007 (unsigned long long)*delta,
1008 (unsigned long long)*ts,
1009 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1010 WARN_ON(1);
1011 }
1012
1013 /*
1014 * The delta is too big, we to add a
1015 * new timestamp.
1016 */
1017 event = __rb_reserve_next(cpu_buffer,
1018 RINGBUF_TYPE_TIME_EXTEND,
1019 RB_LEN_TIME_EXTEND,
1020 ts);
1021 if (!event)
bf41a158 1022 return -EBUSY;
7a8e76a3 1023
bf41a158
SR
1024 if (PTR_ERR(event) == -EAGAIN)
1025 return -EAGAIN;
1026
1027 /* Only a commited time event can update the write stamp */
1028 if (rb_is_commit(cpu_buffer, event)) {
1029 /*
1030 * If this is the first on the page, then we need to
1031 * update the page itself, and just put in a zero.
1032 */
1033 if (rb_event_index(event)) {
1034 event->time_delta = *delta & TS_MASK;
1035 event->array[0] = *delta >> TS_SHIFT;
1036 } else {
1037 cpu_buffer->commit_page->time_stamp = *ts;
1038 event->time_delta = 0;
1039 event->array[0] = 0;
1040 }
7a8e76a3 1041 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1042 /* let the caller know this was the commit */
1043 ret = 1;
1044 } else {
1045 /* Darn, this is just wasted space */
1046 event->time_delta = 0;
1047 event->array[0] = 0;
1048 ret = 0;
7a8e76a3
SR
1049 }
1050
bf41a158
SR
1051 *delta = 0;
1052
1053 return ret;
7a8e76a3
SR
1054}
1055
1056static struct ring_buffer_event *
1057rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1058 unsigned type, unsigned long length)
1059{
1060 struct ring_buffer_event *event;
1061 u64 ts, delta;
bf41a158 1062 int commit = 0;
818e3dd3 1063 int nr_loops = 0;
7a8e76a3 1064
bf41a158 1065 again:
818e3dd3
SR
1066 /*
1067 * We allow for interrupts to reenter here and do a trace.
1068 * If one does, it will cause this original code to loop
1069 * back here. Even with heavy interrupts happening, this
1070 * should only happen a few times in a row. If this happens
1071 * 1000 times in a row, there must be either an interrupt
1072 * storm or we have something buggy.
1073 * Bail!
1074 */
1075 if (unlikely(++nr_loops > 1000)) {
1076 RB_WARN_ON(cpu_buffer, 1);
1077 return NULL;
1078 }
1079
7a8e76a3
SR
1080 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1081
bf41a158
SR
1082 /*
1083 * Only the first commit can update the timestamp.
1084 * Yes there is a race here. If an interrupt comes in
1085 * just after the conditional and it traces too, then it
1086 * will also check the deltas. More than one timestamp may
1087 * also be made. But only the entry that did the actual
1088 * commit will be something other than zero.
1089 */
1090 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1091 rb_page_write(cpu_buffer->tail_page) ==
1092 rb_commit_index(cpu_buffer)) {
1093
7a8e76a3
SR
1094 delta = ts - cpu_buffer->write_stamp;
1095
bf41a158
SR
1096 /* make sure this delta is calculated here */
1097 barrier();
1098
1099 /* Did the write stamp get updated already? */
1100 if (unlikely(ts < cpu_buffer->write_stamp))
4143c5cb 1101 delta = 0;
bf41a158 1102
7a8e76a3 1103 if (test_time_stamp(delta)) {
7a8e76a3 1104
bf41a158
SR
1105 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1106
1107 if (commit == -EBUSY)
7a8e76a3 1108 return NULL;
bf41a158
SR
1109
1110 if (commit == -EAGAIN)
1111 goto again;
1112
1113 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1114 }
bf41a158
SR
1115 } else
1116 /* Non commits have zero deltas */
7a8e76a3 1117 delta = 0;
7a8e76a3
SR
1118
1119 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
bf41a158
SR
1120 if (PTR_ERR(event) == -EAGAIN)
1121 goto again;
1122
1123 if (!event) {
1124 if (unlikely(commit))
1125 /*
1126 * Ouch! We needed a timestamp and it was commited. But
1127 * we didn't get our event reserved.
1128 */
1129 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1130 return NULL;
bf41a158 1131 }
7a8e76a3 1132
bf41a158
SR
1133 /*
1134 * If the timestamp was commited, make the commit our entry
1135 * now so that we will update it when needed.
1136 */
1137 if (commit)
1138 rb_set_commit_event(cpu_buffer, event);
1139 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1140 delta = 0;
1141
1142 event->time_delta = delta;
1143
1144 return event;
1145}
1146
bf41a158
SR
1147static DEFINE_PER_CPU(int, rb_need_resched);
1148
7a8e76a3
SR
1149/**
1150 * ring_buffer_lock_reserve - reserve a part of the buffer
1151 * @buffer: the ring buffer to reserve from
1152 * @length: the length of the data to reserve (excluding event header)
1153 * @flags: a pointer to save the interrupt flags
1154 *
1155 * Returns a reseverd event on the ring buffer to copy directly to.
1156 * The user of this interface will need to get the body to write into
1157 * and can use the ring_buffer_event_data() interface.
1158 *
1159 * The length is the length of the data needed, not the event length
1160 * which also includes the event header.
1161 *
1162 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1163 * If NULL is returned, then nothing has been allocated or locked.
1164 */
1165struct ring_buffer_event *
1166ring_buffer_lock_reserve(struct ring_buffer *buffer,
1167 unsigned long length,
1168 unsigned long *flags)
1169{
1170 struct ring_buffer_per_cpu *cpu_buffer;
1171 struct ring_buffer_event *event;
bf41a158 1172 int cpu, resched;
7a8e76a3
SR
1173
1174 if (atomic_read(&buffer->record_disabled))
1175 return NULL;
1176
bf41a158 1177 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 1178 resched = ftrace_preempt_disable();
bf41a158 1179
7a8e76a3
SR
1180 cpu = raw_smp_processor_id();
1181
1182 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1183 goto out;
7a8e76a3
SR
1184
1185 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1186
1187 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1188 goto out;
7a8e76a3
SR
1189
1190 length = rb_calculate_event_length(length);
1191 if (length > BUF_PAGE_SIZE)
bf41a158 1192 goto out;
7a8e76a3
SR
1193
1194 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1195 if (!event)
d769041f 1196 goto out;
7a8e76a3 1197
bf41a158
SR
1198 /*
1199 * Need to store resched state on this cpu.
1200 * Only the first needs to.
1201 */
1202
1203 if (preempt_count() == 1)
1204 per_cpu(rb_need_resched, cpu) = resched;
1205
7a8e76a3
SR
1206 return event;
1207
d769041f 1208 out:
182e9f5f 1209 ftrace_preempt_enable(resched);
7a8e76a3
SR
1210 return NULL;
1211}
1212
1213static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1214 struct ring_buffer_event *event)
1215{
7a8e76a3 1216 cpu_buffer->entries++;
bf41a158
SR
1217
1218 /* Only process further if we own the commit */
1219 if (!rb_is_commit(cpu_buffer, event))
1220 return;
1221
1222 cpu_buffer->write_stamp += event->time_delta;
1223
1224 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1225}
1226
1227/**
1228 * ring_buffer_unlock_commit - commit a reserved
1229 * @buffer: The buffer to commit to
1230 * @event: The event pointer to commit.
1231 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1232 *
1233 * This commits the data to the ring buffer, and releases any locks held.
1234 *
1235 * Must be paired with ring_buffer_lock_reserve.
1236 */
1237int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1238 struct ring_buffer_event *event,
1239 unsigned long flags)
1240{
1241 struct ring_buffer_per_cpu *cpu_buffer;
1242 int cpu = raw_smp_processor_id();
1243
1244 cpu_buffer = buffer->buffers[cpu];
1245
7a8e76a3
SR
1246 rb_commit(cpu_buffer, event);
1247
bf41a158
SR
1248 /*
1249 * Only the last preempt count needs to restore preemption.
1250 */
182e9f5f
SR
1251 if (preempt_count() == 1)
1252 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1253 else
bf41a158 1254 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1255
1256 return 0;
1257}
1258
1259/**
1260 * ring_buffer_write - write data to the buffer without reserving
1261 * @buffer: The ring buffer to write to.
1262 * @length: The length of the data being written (excluding the event header)
1263 * @data: The data to write to the buffer.
1264 *
1265 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1266 * one function. If you already have the data to write to the buffer, it
1267 * may be easier to simply call this function.
1268 *
1269 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1270 * and not the length of the event which would hold the header.
1271 */
1272int ring_buffer_write(struct ring_buffer *buffer,
1273 unsigned long length,
1274 void *data)
1275{
1276 struct ring_buffer_per_cpu *cpu_buffer;
1277 struct ring_buffer_event *event;
bf41a158 1278 unsigned long event_length;
7a8e76a3
SR
1279 void *body;
1280 int ret = -EBUSY;
bf41a158 1281 int cpu, resched;
7a8e76a3
SR
1282
1283 if (atomic_read(&buffer->record_disabled))
1284 return -EBUSY;
1285
182e9f5f 1286 resched = ftrace_preempt_disable();
bf41a158 1287
7a8e76a3
SR
1288 cpu = raw_smp_processor_id();
1289
1290 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1291 goto out;
7a8e76a3
SR
1292
1293 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1294
1295 if (atomic_read(&cpu_buffer->record_disabled))
1296 goto out;
1297
1298 event_length = rb_calculate_event_length(length);
1299 event = rb_reserve_next_event(cpu_buffer,
1300 RINGBUF_TYPE_DATA, event_length);
1301 if (!event)
1302 goto out;
1303
1304 body = rb_event_data(event);
1305
1306 memcpy(body, data, length);
1307
1308 rb_commit(cpu_buffer, event);
1309
1310 ret = 0;
1311 out:
182e9f5f 1312 ftrace_preempt_enable(resched);
7a8e76a3
SR
1313
1314 return ret;
1315}
1316
bf41a158
SR
1317static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1318{
1319 struct buffer_page *reader = cpu_buffer->reader_page;
1320 struct buffer_page *head = cpu_buffer->head_page;
1321 struct buffer_page *commit = cpu_buffer->commit_page;
1322
1323 return reader->read == rb_page_commit(reader) &&
1324 (commit == reader ||
1325 (commit == head &&
1326 head->read == rb_page_commit(commit)));
1327}
1328
7a8e76a3
SR
1329/**
1330 * ring_buffer_record_disable - stop all writes into the buffer
1331 * @buffer: The ring buffer to stop writes to.
1332 *
1333 * This prevents all writes to the buffer. Any attempt to write
1334 * to the buffer after this will fail and return NULL.
1335 *
1336 * The caller should call synchronize_sched() after this.
1337 */
1338void ring_buffer_record_disable(struct ring_buffer *buffer)
1339{
1340 atomic_inc(&buffer->record_disabled);
1341}
1342
1343/**
1344 * ring_buffer_record_enable - enable writes to the buffer
1345 * @buffer: The ring buffer to enable writes
1346 *
1347 * Note, multiple disables will need the same number of enables
1348 * to truely enable the writing (much like preempt_disable).
1349 */
1350void ring_buffer_record_enable(struct ring_buffer *buffer)
1351{
1352 atomic_dec(&buffer->record_disabled);
1353}
1354
1355/**
1356 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1357 * @buffer: The ring buffer to stop writes to.
1358 * @cpu: The CPU buffer to stop
1359 *
1360 * This prevents all writes to the buffer. Any attempt to write
1361 * to the buffer after this will fail and return NULL.
1362 *
1363 * The caller should call synchronize_sched() after this.
1364 */
1365void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1366{
1367 struct ring_buffer_per_cpu *cpu_buffer;
1368
1369 if (!cpu_isset(cpu, buffer->cpumask))
1370 return;
1371
1372 cpu_buffer = buffer->buffers[cpu];
1373 atomic_inc(&cpu_buffer->record_disabled);
1374}
1375
1376/**
1377 * ring_buffer_record_enable_cpu - enable writes to the buffer
1378 * @buffer: The ring buffer to enable writes
1379 * @cpu: The CPU to enable.
1380 *
1381 * Note, multiple disables will need the same number of enables
1382 * to truely enable the writing (much like preempt_disable).
1383 */
1384void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1385{
1386 struct ring_buffer_per_cpu *cpu_buffer;
1387
1388 if (!cpu_isset(cpu, buffer->cpumask))
1389 return;
1390
1391 cpu_buffer = buffer->buffers[cpu];
1392 atomic_dec(&cpu_buffer->record_disabled);
1393}
1394
1395/**
1396 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1397 * @buffer: The ring buffer
1398 * @cpu: The per CPU buffer to get the entries from.
1399 */
1400unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1401{
1402 struct ring_buffer_per_cpu *cpu_buffer;
1403
1404 if (!cpu_isset(cpu, buffer->cpumask))
1405 return 0;
1406
1407 cpu_buffer = buffer->buffers[cpu];
1408 return cpu_buffer->entries;
1409}
1410
1411/**
1412 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1413 * @buffer: The ring buffer
1414 * @cpu: The per CPU buffer to get the number of overruns from
1415 */
1416unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1417{
1418 struct ring_buffer_per_cpu *cpu_buffer;
1419
1420 if (!cpu_isset(cpu, buffer->cpumask))
1421 return 0;
1422
1423 cpu_buffer = buffer->buffers[cpu];
1424 return cpu_buffer->overrun;
1425}
1426
1427/**
1428 * ring_buffer_entries - get the number of entries in a buffer
1429 * @buffer: The ring buffer
1430 *
1431 * Returns the total number of entries in the ring buffer
1432 * (all CPU entries)
1433 */
1434unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1435{
1436 struct ring_buffer_per_cpu *cpu_buffer;
1437 unsigned long entries = 0;
1438 int cpu;
1439
1440 /* if you care about this being correct, lock the buffer */
1441 for_each_buffer_cpu(buffer, cpu) {
1442 cpu_buffer = buffer->buffers[cpu];
1443 entries += cpu_buffer->entries;
1444 }
1445
1446 return entries;
1447}
1448
1449/**
1450 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1451 * @buffer: The ring buffer
1452 *
1453 * Returns the total number of overruns in the ring buffer
1454 * (all CPU entries)
1455 */
1456unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1457{
1458 struct ring_buffer_per_cpu *cpu_buffer;
1459 unsigned long overruns = 0;
1460 int cpu;
1461
1462 /* if you care about this being correct, lock the buffer */
1463 for_each_buffer_cpu(buffer, cpu) {
1464 cpu_buffer = buffer->buffers[cpu];
1465 overruns += cpu_buffer->overrun;
1466 }
1467
1468 return overruns;
1469}
1470
1471/**
1472 * ring_buffer_iter_reset - reset an iterator
1473 * @iter: The iterator to reset
1474 *
1475 * Resets the iterator, so that it will start from the beginning
1476 * again.
1477 */
1478void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1479{
1480 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
f83c9d0f
SR
1481 unsigned long flags;
1482
1483 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7a8e76a3 1484
d769041f
SR
1485 /* Iterator usage is expected to have record disabled */
1486 if (list_empty(&cpu_buffer->reader_page->list)) {
1487 iter->head_page = cpu_buffer->head_page;
6f807acd 1488 iter->head = cpu_buffer->head_page->read;
d769041f
SR
1489 } else {
1490 iter->head_page = cpu_buffer->reader_page;
6f807acd 1491 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
1492 }
1493 if (iter->head)
1494 iter->read_stamp = cpu_buffer->read_stamp;
1495 else
1496 iter->read_stamp = iter->head_page->time_stamp;
f83c9d0f
SR
1497
1498 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
1499}
1500
1501/**
1502 * ring_buffer_iter_empty - check if an iterator has no more to read
1503 * @iter: The iterator to check
1504 */
1505int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1506{
1507 struct ring_buffer_per_cpu *cpu_buffer;
1508
1509 cpu_buffer = iter->cpu_buffer;
1510
bf41a158
SR
1511 return iter->head_page == cpu_buffer->commit_page &&
1512 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3
SR
1513}
1514
1515static void
1516rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1517 struct ring_buffer_event *event)
1518{
1519 u64 delta;
1520
1521 switch (event->type) {
1522 case RINGBUF_TYPE_PADDING:
1523 return;
1524
1525 case RINGBUF_TYPE_TIME_EXTEND:
1526 delta = event->array[0];
1527 delta <<= TS_SHIFT;
1528 delta += event->time_delta;
1529 cpu_buffer->read_stamp += delta;
1530 return;
1531
1532 case RINGBUF_TYPE_TIME_STAMP:
1533 /* FIXME: not implemented */
1534 return;
1535
1536 case RINGBUF_TYPE_DATA:
1537 cpu_buffer->read_stamp += event->time_delta;
1538 return;
1539
1540 default:
1541 BUG();
1542 }
1543 return;
1544}
1545
1546static void
1547rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1548 struct ring_buffer_event *event)
1549{
1550 u64 delta;
1551
1552 switch (event->type) {
1553 case RINGBUF_TYPE_PADDING:
1554 return;
1555
1556 case RINGBUF_TYPE_TIME_EXTEND:
1557 delta = event->array[0];
1558 delta <<= TS_SHIFT;
1559 delta += event->time_delta;
1560 iter->read_stamp += delta;
1561 return;
1562
1563 case RINGBUF_TYPE_TIME_STAMP:
1564 /* FIXME: not implemented */
1565 return;
1566
1567 case RINGBUF_TYPE_DATA:
1568 iter->read_stamp += event->time_delta;
1569 return;
1570
1571 default:
1572 BUG();
1573 }
1574 return;
1575}
1576
d769041f
SR
1577static struct buffer_page *
1578rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1579{
d769041f
SR
1580 struct buffer_page *reader = NULL;
1581 unsigned long flags;
818e3dd3 1582 int nr_loops = 0;
d769041f 1583
3e03fb7f
SR
1584 local_irq_save(flags);
1585 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
1586
1587 again:
818e3dd3
SR
1588 /*
1589 * This should normally only loop twice. But because the
1590 * start of the reader inserts an empty page, it causes
1591 * a case where we will loop three times. There should be no
1592 * reason to loop four times (that I know of).
1593 */
1594 if (unlikely(++nr_loops > 3)) {
1595 RB_WARN_ON(cpu_buffer, 1);
1596 reader = NULL;
1597 goto out;
1598 }
1599
d769041f
SR
1600 reader = cpu_buffer->reader_page;
1601
1602 /* If there's more to read, return this page */
bf41a158 1603 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
1604 goto out;
1605
1606 /* Never should we have an index greater than the size */
bf41a158
SR
1607 RB_WARN_ON(cpu_buffer,
1608 cpu_buffer->reader_page->read > rb_page_size(reader));
d769041f
SR
1609
1610 /* check if we caught up to the tail */
1611 reader = NULL;
bf41a158 1612 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 1613 goto out;
7a8e76a3
SR
1614
1615 /*
d769041f
SR
1616 * Splice the empty reader page into the list around the head.
1617 * Reset the reader page to size zero.
7a8e76a3 1618 */
7a8e76a3 1619
d769041f
SR
1620 reader = cpu_buffer->head_page;
1621 cpu_buffer->reader_page->list.next = reader->list.next;
1622 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
1623
1624 local_set(&cpu_buffer->reader_page->write, 0);
1625 local_set(&cpu_buffer->reader_page->commit, 0);
7a8e76a3 1626
d769041f
SR
1627 /* Make the reader page now replace the head */
1628 reader->list.prev->next = &cpu_buffer->reader_page->list;
1629 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
1630
1631 /*
d769041f
SR
1632 * If the tail is on the reader, then we must set the head
1633 * to the inserted page, otherwise we set it one before.
7a8e76a3 1634 */
d769041f 1635 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 1636
bf41a158 1637 if (cpu_buffer->commit_page != reader)
d769041f
SR
1638 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1639
1640 /* Finally update the reader page to the new head */
1641 cpu_buffer->reader_page = reader;
1642 rb_reset_reader_page(cpu_buffer);
1643
1644 goto again;
1645
1646 out:
3e03fb7f
SR
1647 __raw_spin_unlock(&cpu_buffer->lock);
1648 local_irq_restore(flags);
d769041f
SR
1649
1650 return reader;
1651}
1652
1653static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1654{
1655 struct ring_buffer_event *event;
1656 struct buffer_page *reader;
1657 unsigned length;
1658
1659 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 1660
d769041f 1661 /* This function should not be called when buffer is empty */
f536aafc 1662 RB_WARN_ON_RET(cpu_buffer, !reader);
7a8e76a3 1663
d769041f
SR
1664 event = rb_reader_event(cpu_buffer);
1665
1666 if (event->type == RINGBUF_TYPE_DATA)
1667 cpu_buffer->entries--;
1668
1669 rb_update_read_stamp(cpu_buffer, event);
1670
1671 length = rb_event_length(event);
6f807acd 1672 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
1673}
1674
1675static void rb_advance_iter(struct ring_buffer_iter *iter)
1676{
1677 struct ring_buffer *buffer;
1678 struct ring_buffer_per_cpu *cpu_buffer;
1679 struct ring_buffer_event *event;
1680 unsigned length;
1681
1682 cpu_buffer = iter->cpu_buffer;
1683 buffer = cpu_buffer->buffer;
1684
1685 /*
1686 * Check if we are at the end of the buffer.
1687 */
bf41a158 1688 if (iter->head >= rb_page_size(iter->head_page)) {
f536aafc
SR
1689 RB_WARN_ON_RET(buffer,
1690 iter->head_page == cpu_buffer->commit_page);
d769041f 1691 rb_inc_iter(iter);
7a8e76a3
SR
1692 return;
1693 }
1694
1695 event = rb_iter_head_event(iter);
1696
1697 length = rb_event_length(event);
1698
1699 /*
1700 * This should not be called to advance the header if we are
1701 * at the tail of the buffer.
1702 */
f536aafc
SR
1703 RB_WARN_ON_RET(cpu_buffer,
1704 (iter->head_page == cpu_buffer->commit_page) &&
1705 (iter->head + length > rb_commit_index(cpu_buffer)));
7a8e76a3
SR
1706
1707 rb_update_iter_read_stamp(iter, event);
1708
1709 iter->head += length;
1710
1711 /* check for end of page padding */
bf41a158
SR
1712 if ((iter->head >= rb_page_size(iter->head_page)) &&
1713 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
1714 rb_advance_iter(iter);
1715}
1716
f83c9d0f
SR
1717static struct ring_buffer_event *
1718rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
7a8e76a3
SR
1719{
1720 struct ring_buffer_per_cpu *cpu_buffer;
1721 struct ring_buffer_event *event;
d769041f 1722 struct buffer_page *reader;
818e3dd3 1723 int nr_loops = 0;
7a8e76a3
SR
1724
1725 if (!cpu_isset(cpu, buffer->cpumask))
1726 return NULL;
1727
1728 cpu_buffer = buffer->buffers[cpu];
1729
1730 again:
818e3dd3
SR
1731 /*
1732 * We repeat when a timestamp is encountered. It is possible
1733 * to get multiple timestamps from an interrupt entering just
1734 * as one timestamp is about to be written. The max times
1735 * that this can happen is the number of nested interrupts we
1736 * can have. Nesting 10 deep of interrupts is clearly
1737 * an anomaly.
1738 */
1739 if (unlikely(++nr_loops > 10)) {
1740 RB_WARN_ON(cpu_buffer, 1);
1741 return NULL;
1742 }
1743
d769041f
SR
1744 reader = rb_get_reader_page(cpu_buffer);
1745 if (!reader)
7a8e76a3
SR
1746 return NULL;
1747
d769041f 1748 event = rb_reader_event(cpu_buffer);
7a8e76a3
SR
1749
1750 switch (event->type) {
1751 case RINGBUF_TYPE_PADDING:
bf41a158 1752 RB_WARN_ON(cpu_buffer, 1);
d769041f
SR
1753 rb_advance_reader(cpu_buffer);
1754 return NULL;
7a8e76a3
SR
1755
1756 case RINGBUF_TYPE_TIME_EXTEND:
1757 /* Internal data, OK to advance */
d769041f 1758 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1759 goto again;
1760
1761 case RINGBUF_TYPE_TIME_STAMP:
1762 /* FIXME: not implemented */
d769041f 1763 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1764 goto again;
1765
1766 case RINGBUF_TYPE_DATA:
1767 if (ts) {
1768 *ts = cpu_buffer->read_stamp + event->time_delta;
1769 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1770 }
1771 return event;
1772
1773 default:
1774 BUG();
1775 }
1776
1777 return NULL;
1778}
1779
f83c9d0f
SR
1780static struct ring_buffer_event *
1781rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
1782{
1783 struct ring_buffer *buffer;
1784 struct ring_buffer_per_cpu *cpu_buffer;
1785 struct ring_buffer_event *event;
818e3dd3 1786 int nr_loops = 0;
7a8e76a3
SR
1787
1788 if (ring_buffer_iter_empty(iter))
1789 return NULL;
1790
1791 cpu_buffer = iter->cpu_buffer;
1792 buffer = cpu_buffer->buffer;
1793
1794 again:
818e3dd3
SR
1795 /*
1796 * We repeat when a timestamp is encountered. It is possible
1797 * to get multiple timestamps from an interrupt entering just
1798 * as one timestamp is about to be written. The max times
1799 * that this can happen is the number of nested interrupts we
1800 * can have. Nesting 10 deep of interrupts is clearly
1801 * an anomaly.
1802 */
1803 if (unlikely(++nr_loops > 10)) {
1804 RB_WARN_ON(cpu_buffer, 1);
1805 return NULL;
1806 }
1807
7a8e76a3
SR
1808 if (rb_per_cpu_empty(cpu_buffer))
1809 return NULL;
1810
1811 event = rb_iter_head_event(iter);
1812
1813 switch (event->type) {
1814 case RINGBUF_TYPE_PADDING:
d769041f 1815 rb_inc_iter(iter);
7a8e76a3
SR
1816 goto again;
1817
1818 case RINGBUF_TYPE_TIME_EXTEND:
1819 /* Internal data, OK to advance */
1820 rb_advance_iter(iter);
1821 goto again;
1822
1823 case RINGBUF_TYPE_TIME_STAMP:
1824 /* FIXME: not implemented */
1825 rb_advance_iter(iter);
1826 goto again;
1827
1828 case RINGBUF_TYPE_DATA:
1829 if (ts) {
1830 *ts = iter->read_stamp + event->time_delta;
1831 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1832 }
1833 return event;
1834
1835 default:
1836 BUG();
1837 }
1838
1839 return NULL;
1840}
1841
f83c9d0f
SR
1842/**
1843 * ring_buffer_peek - peek at the next event to be read
1844 * @buffer: The ring buffer to read
1845 * @cpu: The cpu to peak at
1846 * @ts: The timestamp counter of this event.
1847 *
1848 * This will return the event that will be read next, but does
1849 * not consume the data.
1850 */
1851struct ring_buffer_event *
1852ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1853{
1854 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1855 struct ring_buffer_event *event;
1856 unsigned long flags;
1857
1858 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1859 event = rb_buffer_peek(buffer, cpu, ts);
1860 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1861
1862 return event;
1863}
1864
1865/**
1866 * ring_buffer_iter_peek - peek at the next event to be read
1867 * @iter: The ring buffer iterator
1868 * @ts: The timestamp counter of this event.
1869 *
1870 * This will return the event that will be read next, but does
1871 * not increment the iterator.
1872 */
1873struct ring_buffer_event *
1874ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1875{
1876 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1877 struct ring_buffer_event *event;
1878 unsigned long flags;
1879
1880 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1881 event = rb_iter_peek(iter, ts);
1882 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1883
1884 return event;
1885}
1886
7a8e76a3
SR
1887/**
1888 * ring_buffer_consume - return an event and consume it
1889 * @buffer: The ring buffer to get the next event from
1890 *
1891 * Returns the next event in the ring buffer, and that event is consumed.
1892 * Meaning, that sequential reads will keep returning a different event,
1893 * and eventually empty the ring buffer if the producer is slower.
1894 */
1895struct ring_buffer_event *
1896ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1897{
f83c9d0f 1898 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
7a8e76a3 1899 struct ring_buffer_event *event;
f83c9d0f 1900 unsigned long flags;
7a8e76a3
SR
1901
1902 if (!cpu_isset(cpu, buffer->cpumask))
1903 return NULL;
1904
f83c9d0f
SR
1905 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1906
1907 event = rb_buffer_peek(buffer, cpu, ts);
7a8e76a3 1908 if (!event)
f83c9d0f 1909 goto out;
7a8e76a3 1910
d769041f 1911 rb_advance_reader(cpu_buffer);
7a8e76a3 1912
f83c9d0f
SR
1913 out:
1914 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1915
7a8e76a3
SR
1916 return event;
1917}
1918
1919/**
1920 * ring_buffer_read_start - start a non consuming read of the buffer
1921 * @buffer: The ring buffer to read from
1922 * @cpu: The cpu buffer to iterate over
1923 *
1924 * This starts up an iteration through the buffer. It also disables
1925 * the recording to the buffer until the reading is finished.
1926 * This prevents the reading from being corrupted. This is not
1927 * a consuming read, so a producer is not expected.
1928 *
1929 * Must be paired with ring_buffer_finish.
1930 */
1931struct ring_buffer_iter *
1932ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1933{
1934 struct ring_buffer_per_cpu *cpu_buffer;
1935 struct ring_buffer_iter *iter;
d769041f 1936 unsigned long flags;
7a8e76a3
SR
1937
1938 if (!cpu_isset(cpu, buffer->cpumask))
1939 return NULL;
1940
1941 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1942 if (!iter)
1943 return NULL;
1944
1945 cpu_buffer = buffer->buffers[cpu];
1946
1947 iter->cpu_buffer = cpu_buffer;
1948
1949 atomic_inc(&cpu_buffer->record_disabled);
1950 synchronize_sched();
1951
f83c9d0f 1952 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3e03fb7f 1953 __raw_spin_lock(&cpu_buffer->lock);
d769041f 1954 ring_buffer_iter_reset(iter);
3e03fb7f 1955 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f 1956 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
1957
1958 return iter;
1959}
1960
1961/**
1962 * ring_buffer_finish - finish reading the iterator of the buffer
1963 * @iter: The iterator retrieved by ring_buffer_start
1964 *
1965 * This re-enables the recording to the buffer, and frees the
1966 * iterator.
1967 */
1968void
1969ring_buffer_read_finish(struct ring_buffer_iter *iter)
1970{
1971 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1972
1973 atomic_dec(&cpu_buffer->record_disabled);
1974 kfree(iter);
1975}
1976
1977/**
1978 * ring_buffer_read - read the next item in the ring buffer by the iterator
1979 * @iter: The ring buffer iterator
1980 * @ts: The time stamp of the event read.
1981 *
1982 * This reads the next event in the ring buffer and increments the iterator.
1983 */
1984struct ring_buffer_event *
1985ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1986{
1987 struct ring_buffer_event *event;
f83c9d0f
SR
1988 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1989 unsigned long flags;
7a8e76a3 1990
f83c9d0f
SR
1991 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1992 event = rb_iter_peek(iter, ts);
7a8e76a3 1993 if (!event)
f83c9d0f 1994 goto out;
7a8e76a3
SR
1995
1996 rb_advance_iter(iter);
f83c9d0f
SR
1997 out:
1998 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
1999
2000 return event;
2001}
2002
2003/**
2004 * ring_buffer_size - return the size of the ring buffer (in bytes)
2005 * @buffer: The ring buffer.
2006 */
2007unsigned long ring_buffer_size(struct ring_buffer *buffer)
2008{
2009 return BUF_PAGE_SIZE * buffer->pages;
2010}
2011
2012static void
2013rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2014{
2015 cpu_buffer->head_page
2016 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158
SR
2017 local_set(&cpu_buffer->head_page->write, 0);
2018 local_set(&cpu_buffer->head_page->commit, 0);
d769041f 2019
6f807acd 2020 cpu_buffer->head_page->read = 0;
bf41a158
SR
2021
2022 cpu_buffer->tail_page = cpu_buffer->head_page;
2023 cpu_buffer->commit_page = cpu_buffer->head_page;
2024
2025 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2026 local_set(&cpu_buffer->reader_page->write, 0);
2027 local_set(&cpu_buffer->reader_page->commit, 0);
6f807acd 2028 cpu_buffer->reader_page->read = 0;
7a8e76a3 2029
7a8e76a3
SR
2030 cpu_buffer->overrun = 0;
2031 cpu_buffer->entries = 0;
2032}
2033
2034/**
2035 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2036 * @buffer: The ring buffer to reset a per cpu buffer of
2037 * @cpu: The CPU buffer to be reset
2038 */
2039void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2040{
2041 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2042 unsigned long flags;
2043
2044 if (!cpu_isset(cpu, buffer->cpumask))
2045 return;
2046
f83c9d0f
SR
2047 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2048
3e03fb7f 2049 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
2050
2051 rb_reset_cpu(cpu_buffer);
2052
3e03fb7f 2053 __raw_spin_unlock(&cpu_buffer->lock);
f83c9d0f
SR
2054
2055 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
2056}
2057
2058/**
2059 * ring_buffer_reset - reset a ring buffer
2060 * @buffer: The ring buffer to reset all cpu buffers
2061 */
2062void ring_buffer_reset(struct ring_buffer *buffer)
2063{
7a8e76a3
SR
2064 int cpu;
2065
7a8e76a3 2066 for_each_buffer_cpu(buffer, cpu)
d769041f 2067 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3
SR
2068}
2069
2070/**
2071 * rind_buffer_empty - is the ring buffer empty?
2072 * @buffer: The ring buffer to test
2073 */
2074int ring_buffer_empty(struct ring_buffer *buffer)
2075{
2076 struct ring_buffer_per_cpu *cpu_buffer;
2077 int cpu;
2078
2079 /* yes this is racy, but if you don't like the race, lock the buffer */
2080 for_each_buffer_cpu(buffer, cpu) {
2081 cpu_buffer = buffer->buffers[cpu];
2082 if (!rb_per_cpu_empty(cpu_buffer))
2083 return 0;
2084 }
2085 return 1;
2086}
2087
2088/**
2089 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2090 * @buffer: The ring buffer
2091 * @cpu: The CPU buffer to test
2092 */
2093int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2094{
2095 struct ring_buffer_per_cpu *cpu_buffer;
2096
2097 if (!cpu_isset(cpu, buffer->cpumask))
2098 return 1;
2099
2100 cpu_buffer = buffer->buffers[cpu];
2101 return rb_per_cpu_empty(cpu_buffer);
2102}
2103
2104/**
2105 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2106 * @buffer_a: One buffer to swap with
2107 * @buffer_b: The other buffer to swap with
2108 *
2109 * This function is useful for tracers that want to take a "snapshot"
2110 * of a CPU buffer and has another back up buffer lying around.
2111 * it is expected that the tracer handles the cpu buffer not being
2112 * used at the moment.
2113 */
2114int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2115 struct ring_buffer *buffer_b, int cpu)
2116{
2117 struct ring_buffer_per_cpu *cpu_buffer_a;
2118 struct ring_buffer_per_cpu *cpu_buffer_b;
2119
2120 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2121 !cpu_isset(cpu, buffer_b->cpumask))
2122 return -EINVAL;
2123
2124 /* At least make sure the two buffers are somewhat the same */
2125 if (buffer_a->size != buffer_b->size ||
2126 buffer_a->pages != buffer_b->pages)
2127 return -EINVAL;
2128
2129 cpu_buffer_a = buffer_a->buffers[cpu];
2130 cpu_buffer_b = buffer_b->buffers[cpu];
2131
2132 /*
2133 * We can't do a synchronize_sched here because this
2134 * function can be called in atomic context.
2135 * Normally this will be called from the same CPU as cpu.
2136 * If not it's up to the caller to protect this.
2137 */
2138 atomic_inc(&cpu_buffer_a->record_disabled);
2139 atomic_inc(&cpu_buffer_b->record_disabled);
2140
2141 buffer_a->buffers[cpu] = cpu_buffer_b;
2142 buffer_b->buffers[cpu] = cpu_buffer_a;
2143
2144 cpu_buffer_b->buffer = buffer_a;
2145 cpu_buffer_a->buffer = buffer_b;
2146
2147 atomic_dec(&cpu_buffer_a->record_disabled);
2148 atomic_dec(&cpu_buffer_b->record_disabled);
2149
2150 return 0;
2151}
2152