ring-buffer: Fix resetting of shortest_full
[linux-2.6-block.git] / kernel / trace / ring_buffer.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
7a8e76a3
SR
2/*
3 * Generic ring buffer
4 *
5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 */
28575c61 7#include <linux/trace_recursion.h>
af658dca 8#include <linux/trace_events.h>
7a8e76a3 9#include <linux/ring_buffer.h>
14131f2f 10#include <linux/trace_clock.h>
e6017571 11#include <linux/sched/clock.h>
0b07436d 12#include <linux/trace_seq.h>
7a8e76a3 13#include <linux/spinlock.h>
15693458 14#include <linux/irq_work.h>
a356646a 15#include <linux/security.h>
7a8e76a3 16#include <linux/uaccess.h>
a81bd80a 17#include <linux/hardirq.h>
6c43e554 18#include <linux/kthread.h> /* for self test */
7a8e76a3
SR
19#include <linux/module.h>
20#include <linux/percpu.h>
21#include <linux/mutex.h>
6c43e554 22#include <linux/delay.h>
5a0e3ad6 23#include <linux/slab.h>
7a8e76a3
SR
24#include <linux/init.h>
25#include <linux/hash.h>
26#include <linux/list.h>
554f786e 27#include <linux/cpu.h>
927e56db 28#include <linux/oom.h>
7a8e76a3 29
c84897c0 30#include <asm/local64.h>
79615760 31#include <asm/local.h>
182e9f5f 32
6695da58
SRG
33/*
34 * The "absolute" timestamp in the buffer is only 59 bits.
35 * If a clock has the 5 MSBs set, it needs to be saved and
36 * reinserted.
37 */
38#define TS_MSB (0xf8ULL << 56)
39#define ABS_TS_MASK (~TS_MSB)
40
83f40318
VN
41static void update_pages_handler(struct work_struct *work);
42
d1b182a8
SR
43/*
44 * The ring buffer header is special. We must manually up keep it.
45 */
46int ring_buffer_print_entry_header(struct trace_seq *s)
47{
c0cd93aa
SRRH
48 trace_seq_puts(s, "# compressed entry header\n");
49 trace_seq_puts(s, "\ttype_len : 5 bits\n");
50 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
51 trace_seq_puts(s, "\tarray : 32 bits\n");
52 trace_seq_putc(s, '\n');
53 trace_seq_printf(s, "\tpadding : type == %d\n",
54 RINGBUF_TYPE_PADDING);
55 trace_seq_printf(s, "\ttime_extend : type == %d\n",
56 RINGBUF_TYPE_TIME_EXTEND);
dc4e2801
TZ
57 trace_seq_printf(s, "\ttime_stamp : type == %d\n",
58 RINGBUF_TYPE_TIME_STAMP);
c0cd93aa
SRRH
59 trace_seq_printf(s, "\tdata max type_len == %d\n",
60 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
61
62 return !trace_seq_has_overflowed(s);
d1b182a8
SR
63}
64
5cc98548
SR
65/*
66 * The ring buffer is made up of a list of pages. A separate list of pages is
67 * allocated for each CPU. A writer may only write to a buffer that is
68 * associated with the CPU it is currently executing on. A reader may read
69 * from any per cpu buffer.
70 *
71 * The reader is special. For each per cpu buffer, the reader has its own
72 * reader page. When a reader has read the entire reader page, this reader
73 * page is swapped with another page in the ring buffer.
74 *
75 * Now, as long as the writer is off the reader page, the reader can do what
76 * ever it wants with that page. The writer will never write to that page
77 * again (as long as it is out of the ring buffer).
78 *
79 * Here's some silly ASCII art.
80 *
81 * +------+
82 * |reader| RING BUFFER
83 * |page |
84 * +------+ +---+ +---+ +---+
85 * | |-->| |-->| |
86 * +---+ +---+ +---+
87 * ^ |
88 * | |
89 * +---------------+
90 *
91 *
92 * +------+
93 * |reader| RING BUFFER
94 * |page |------------------v
95 * +------+ +---+ +---+ +---+
96 * | |-->| |-->| |
97 * +---+ +---+ +---+
98 * ^ |
99 * | |
100 * +---------------+
101 *
102 *
103 * +------+
104 * |reader| RING BUFFER
105 * |page |------------------v
106 * +------+ +---+ +---+ +---+
107 * ^ | |-->| |-->| |
108 * | +---+ +---+ +---+
109 * | |
110 * | |
111 * +------------------------------+
112 *
113 *
114 * +------+
115 * |buffer| RING BUFFER
116 * |page |------------------v
117 * +------+ +---+ +---+ +---+
118 * ^ | | | |-->| |
119 * | New +---+ +---+ +---+
120 * | Reader------^ |
121 * | page |
122 * +------------------------------+
123 *
124 *
125 * After we make this swap, the reader can hand this page off to the splice
126 * code and be done with it. It can even allocate a new page if it needs to
127 * and swap that into the ring buffer.
128 *
129 * We will be using cmpxchg soon to make all this lockless.
130 *
131 */
132
499e5470
SR
133/* Used for individual buffers (after the counter) */
134#define RB_BUFFER_OFF (1 << 20)
a3583244 135
499e5470 136#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
033601a3 137
e3d6bf0a 138#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 139#define RB_ALIGNMENT 4U
334d4169 140#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
c7b09308 141#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
adab66b7
SRV
142
143#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
144# define RB_FORCE_8BYTE_ALIGNMENT 0
145# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
146#else
147# define RB_FORCE_8BYTE_ALIGNMENT 1
148# define RB_ARCH_ALIGNMENT 8U
149#endif
150
151#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
649508f6 152
334d4169
LJ
153/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
154#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
155
156enum {
157 RB_LEN_TIME_EXTEND = 8,
dc4e2801 158 RB_LEN_TIME_STAMP = 8,
7a8e76a3
SR
159};
160
69d1b839
SR
161#define skip_time_extend(event) \
162 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
163
dc4e2801
TZ
164#define extended_time(event) \
165 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
166
bc92b956 167static inline bool rb_null_event(struct ring_buffer_event *event)
2d622719 168{
a1863c21 169 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2d622719
TZ
170}
171
172static void rb_event_set_padding(struct ring_buffer_event *event)
173{
a1863c21 174 /* padding has a NULL time_delta */
334d4169 175 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
176 event->time_delta = 0;
177}
178
34a148bf 179static unsigned
2d622719 180rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
181{
182 unsigned length;
183
334d4169
LJ
184 if (event->type_len)
185 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
186 else
187 length = event->array[0];
188 return length + RB_EVNT_HDR_SIZE;
189}
190
69d1b839
SR
191/*
192 * Return the length of the given event. Will return
193 * the length of the time extend if the event is a
194 * time extend.
195 */
196static inline unsigned
2d622719
TZ
197rb_event_length(struct ring_buffer_event *event)
198{
334d4169 199 switch (event->type_len) {
7a8e76a3 200 case RINGBUF_TYPE_PADDING:
2d622719
TZ
201 if (rb_null_event(event))
202 /* undefined */
203 return -1;
334d4169 204 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
205
206 case RINGBUF_TYPE_TIME_EXTEND:
207 return RB_LEN_TIME_EXTEND;
208
209 case RINGBUF_TYPE_TIME_STAMP:
210 return RB_LEN_TIME_STAMP;
211
212 case RINGBUF_TYPE_DATA:
2d622719 213 return rb_event_data_length(event);
7a8e76a3 214 default:
da4d401a 215 WARN_ON_ONCE(1);
7a8e76a3
SR
216 }
217 /* not hit */
218 return 0;
219}
220
69d1b839
SR
221/*
222 * Return total length of time extend and data,
223 * or just the event length for all other events.
224 */
225static inline unsigned
226rb_event_ts_length(struct ring_buffer_event *event)
227{
228 unsigned len = 0;
229
dc4e2801 230 if (extended_time(event)) {
69d1b839
SR
231 /* time extends include the data event after it */
232 len = RB_LEN_TIME_EXTEND;
233 event = skip_time_extend(event);
234 }
235 return len + rb_event_length(event);
236}
237
7a8e76a3
SR
238/**
239 * ring_buffer_event_length - return the length of the event
240 * @event: the event to get the length of
69d1b839
SR
241 *
242 * Returns the size of the data load of a data event.
243 * If the event is something other than a data event, it
244 * returns the size of the event itself. With the exception
245 * of a TIME EXTEND, where it still returns the size of the
246 * data load of the data event after it.
7a8e76a3
SR
247 */
248unsigned ring_buffer_event_length(struct ring_buffer_event *event)
249{
69d1b839
SR
250 unsigned length;
251
dc4e2801 252 if (extended_time(event))
69d1b839
SR
253 event = skip_time_extend(event);
254
255 length = rb_event_length(event);
334d4169 256 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
257 return length;
258 length -= RB_EVNT_HDR_SIZE;
259 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
260 length -= sizeof(event->array[0]);
261 return length;
7a8e76a3 262}
c4f50183 263EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
264
265/* inline for ring buffer fast paths */
929ddbf3 266static __always_inline void *
7a8e76a3
SR
267rb_event_data(struct ring_buffer_event *event)
268{
dc4e2801 269 if (extended_time(event))
69d1b839 270 event = skip_time_extend(event);
da4d401a 271 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 272 /* If length is in len field, then array[0] has the data */
334d4169 273 if (event->type_len)
7a8e76a3
SR
274 return (void *)&event->array[0];
275 /* Otherwise length is in array[0] and array[1] has the data */
276 return (void *)&event->array[1];
277}
278
279/**
280 * ring_buffer_event_data - return the data of the event
281 * @event: the event to get the data from
282 */
283void *ring_buffer_event_data(struct ring_buffer_event *event)
284{
285 return rb_event_data(event);
286}
c4f50183 287EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
288
289#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 290 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3 291
b23d7a5f
NP
292#define for_each_online_buffer_cpu(buffer, cpu) \
293 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
294
7a8e76a3
SR
295#define TS_SHIFT 27
296#define TS_MASK ((1ULL << TS_SHIFT) - 1)
297#define TS_DELTA_TEST (~TS_MASK)
298
e20044f7
SRV
299static u64 rb_event_time_stamp(struct ring_buffer_event *event)
300{
301 u64 ts;
302
303 ts = event->array[0];
304 ts <<= TS_SHIFT;
305 ts += event->time_delta;
306
307 return ts;
308}
309
66a8cb95
SR
310/* Flag when events were overwritten */
311#define RB_MISSED_EVENTS (1 << 31)
ff0ff84a
SR
312/* Missed count stored at end */
313#define RB_MISSED_STORED (1 << 30)
66a8cb95 314
abc9b56d 315struct buffer_data_page {
e4c2ce82 316 u64 time_stamp; /* page time stamp */
c3706f00 317 local_t commit; /* write committed index */
649508f6 318 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
abc9b56d
SR
319};
320
bce761d7
TSV
321struct buffer_data_read_page {
322 unsigned order; /* order of the page */
323 struct buffer_data_page *data; /* actual data, stored in this page */
324};
325
77ae365e
SR
326/*
327 * Note, the buffer_page list must be first. The buffer pages
328 * are allocated in cache lines, which means that each buffer
329 * page will be at the beginning of a cache line, and thus
330 * the least significant bits will be zero. We use this to
331 * add flags in the list struct pointers, to make the ring buffer
332 * lockless.
333 */
abc9b56d 334struct buffer_page {
778c55d4 335 struct list_head list; /* list of buffer pages */
abc9b56d 336 local_t write; /* index for next write */
6f807acd 337 unsigned read; /* index for next read */
778c55d4 338 local_t entries; /* entries on this page */
ff0ff84a 339 unsigned long real_end; /* real end of data */
f9b94daa 340 unsigned order; /* order of the page */
abc9b56d 341 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
342};
343
77ae365e
SR
344/*
345 * The buffer page counters, write and entries, must be reset
346 * atomically when crossing page boundaries. To synchronize this
347 * update, two counters are inserted into the number. One is
348 * the actual counter for the write position or count on the page.
349 *
350 * The other is a counter of updaters. Before an update happens
351 * the update partition of the counter is incremented. This will
352 * allow the updater to update the counter atomically.
353 *
354 * The counter is 20 bits, and the state data is 12.
355 */
356#define RB_WRITE_MASK 0xfffff
357#define RB_WRITE_INTCNT (1 << 20)
358
044fa782 359static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 360{
044fa782 361 local_set(&bpage->commit, 0);
abc9b56d
SR
362}
363
45d99ea4
ZY
364static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
365{
366 return local_read(&bpage->page->commit);
367}
368
34a148bf 369static void free_buffer_page(struct buffer_page *bpage)
ed56829c 370{
f9b94daa 371 free_pages((unsigned long)bpage->page, bpage->order);
e4c2ce82 372 kfree(bpage);
ed56829c
SR
373}
374
7a8e76a3
SR
375/*
376 * We need to fit the time_stamp delta into 27 bits.
377 */
bc92b956 378static inline bool test_time_stamp(u64 delta)
7a8e76a3 379{
bc92b956 380 return !!(delta & TS_DELTA_TEST);
7a8e76a3
SR
381}
382
15693458
SRRH
383struct rb_irq_work {
384 struct irq_work work;
385 wait_queue_head_t waiters;
1e0d6714 386 wait_queue_head_t full_waiters;
15693458 387 bool waiters_pending;
1e0d6714
SRRH
388 bool full_waiters_pending;
389 bool wakeup_full;
15693458
SRRH
390};
391
fcc742ea
SRRH
392/*
393 * Structure to hold event state and handle nested events.
394 */
395struct rb_event_info {
396 u64 ts;
397 u64 delta;
58fbc3c6
SRV
398 u64 before;
399 u64 after;
fcc742ea
SRRH
400 unsigned long length;
401 struct buffer_page *tail_page;
402 int add_timestamp;
403};
404
a389d86f
SRV
405/*
406 * Used for the add_timestamp
407 * NONE
7c4b4a51
SRV
408 * EXTEND - wants a time extend
409 * ABSOLUTE - the buffer requests all events to have absolute time stamps
a389d86f
SRV
410 * FORCE - force a full time stamp.
411 */
412enum {
7c4b4a51
SRV
413 RB_ADD_STAMP_NONE = 0,
414 RB_ADD_STAMP_EXTEND = BIT(1),
415 RB_ADD_STAMP_ABSOLUTE = BIT(2),
416 RB_ADD_STAMP_FORCE = BIT(3)
a389d86f 417};
a497adb4
SRRH
418/*
419 * Used for which event context the event is in.
b02414c8
SRV
420 * TRANSITION = 0
421 * NMI = 1
422 * IRQ = 2
423 * SOFTIRQ = 3
424 * NORMAL = 4
a497adb4
SRRH
425 *
426 * See trace_recursive_lock() comment below for more details.
427 */
428enum {
b02414c8 429 RB_CTX_TRANSITION,
a497adb4
SRRH
430 RB_CTX_NMI,
431 RB_CTX_IRQ,
432 RB_CTX_SOFTIRQ,
433 RB_CTX_NORMAL,
434 RB_CTX_MAX
435};
436
10464b4a
SRV
437struct rb_time_struct {
438 local64_t time;
439};
10464b4a
SRV
440typedef struct rb_time_struct rb_time_t;
441
8672e494
SRV
442#define MAX_NEST 5
443
7a8e76a3
SR
444/*
445 * head_page == tail_page && head == tail then buffer is empty.
446 */
447struct ring_buffer_per_cpu {
448 int cpu;
985023de 449 atomic_t record_disabled;
07b8b10e 450 atomic_t resize_disabled;
13292494 451 struct trace_buffer *buffer;
5389f6fa 452 raw_spinlock_t reader_lock; /* serialize readers */
445c8951 453 arch_spinlock_t lock;
7a8e76a3 454 struct lock_class_key lock_key;
73a757e6 455 struct buffer_data_page *free_page;
9b94a8fb 456 unsigned long nr_pages;
58a09ec6 457 unsigned int current_context;
3adc54fa 458 struct list_head *pages;
6f807acd
SR
459 struct buffer_page *head_page; /* read from head */
460 struct buffer_page *tail_page; /* write to tail */
c3706f00 461 struct buffer_page *commit_page; /* committed pages */
d769041f 462 struct buffer_page *reader_page;
66a8cb95
SR
463 unsigned long lost_events;
464 unsigned long last_overrun;
8e012066 465 unsigned long nest;
c64e148a 466 local_t entries_bytes;
e4906eff 467 local_t entries;
884bfe89
SP
468 local_t overrun;
469 local_t commit_overrun;
470 local_t dropped_events;
fa743953
SR
471 local_t committing;
472 local_t commits;
2c2b0a78 473 local_t pages_touched;
31029a8b 474 local_t pages_lost;
2c2b0a78 475 local_t pages_read;
03329f99 476 long last_pages_touch;
2c2b0a78 477 size_t shortest_full;
77ae365e 478 unsigned long read;
c64e148a 479 unsigned long read_bytes;
10464b4a
SRV
480 rb_time_t write_stamp;
481 rb_time_t before_stamp;
8672e494 482 u64 event_stamp[MAX_NEST];
7a8e76a3 483 u64 read_stamp;
2d093282
ZY
484 /* pages removed since last reset */
485 unsigned long pages_removed;
438ced17 486 /* ring buffer pages to update, > 0 to add, < 0 to remove */
9b94a8fb 487 long nr_pages_to_update;
438ced17 488 struct list_head new_pages; /* new pages to add */
83f40318 489 struct work_struct update_pages_work;
05fdd70d 490 struct completion update_done;
15693458
SRRH
491
492 struct rb_irq_work irq_work;
7a8e76a3
SR
493};
494
13292494 495struct trace_buffer {
7a8e76a3
SR
496 unsigned flags;
497 int cpus;
7a8e76a3 498 atomic_t record_disabled;
8a96c028 499 atomic_t resizing;
00f62f61 500 cpumask_var_t cpumask;
7a8e76a3 501
1f8a6a10
PZ
502 struct lock_class_key *reader_lock_key;
503
7a8e76a3
SR
504 struct mutex mutex;
505
506 struct ring_buffer_per_cpu **buffers;
554f786e 507
b32614c0 508 struct hlist_node node;
37886f6a 509 u64 (*clock)(void);
15693458
SRRH
510
511 struct rb_irq_work irq_work;
00b41452 512 bool time_stamp_abs;
139f8400
TSV
513
514 unsigned int subbuf_size;
2808e31e 515 unsigned int subbuf_order;
139f8400 516 unsigned int max_data_size;
7a8e76a3
SR
517};
518
519struct ring_buffer_iter {
520 struct ring_buffer_per_cpu *cpu_buffer;
521 unsigned long head;
785888c5 522 unsigned long next_event;
7a8e76a3 523 struct buffer_page *head_page;
492a74f4
SR
524 struct buffer_page *cache_reader_page;
525 unsigned long cache_read;
2d093282 526 unsigned long cache_pages_removed;
7a8e76a3 527 u64 read_stamp;
28e3fc56 528 u64 page_stamp;
785888c5 529 struct ring_buffer_event *event;
139f8400 530 size_t event_size;
c9b7a4a7 531 int missed_events;
7a8e76a3
SR
532};
533
139f8400 534int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
10464b4a 535{
d5cfbdfc 536 struct buffer_data_page field;
10464b4a 537
d5cfbdfc
TSV
538 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
539 "offset:0;\tsize:%u;\tsigned:%u;\n",
540 (unsigned int)sizeof(field.time_stamp),
541 (unsigned int)is_signed_type(u64));
10464b4a 542
d5cfbdfc
TSV
543 trace_seq_printf(s, "\tfield: local_t commit;\t"
544 "offset:%u;\tsize:%u;\tsigned:%u;\n",
545 (unsigned int)offsetof(typeof(field), commit),
546 (unsigned int)sizeof(field.commit),
547 (unsigned int)is_signed_type(long));
10464b4a 548
d5cfbdfc
TSV
549 trace_seq_printf(s, "\tfield: int overwrite;\t"
550 "offset:%u;\tsize:%u;\tsigned:%u;\n",
551 (unsigned int)offsetof(typeof(field), commit),
552 1,
553 (unsigned int)is_signed_type(long));
10464b4a 554
d5cfbdfc
TSV
555 trace_seq_printf(s, "\tfield: char data;\t"
556 "offset:%u;\tsize:%u;\tsigned:%u;\n",
557 (unsigned int)offsetof(typeof(field), data),
139f8400 558 (unsigned int)buffer->subbuf_size,
d5cfbdfc 559 (unsigned int)is_signed_type(char));
10464b4a 560
d5cfbdfc 561 return !trace_seq_has_overflowed(s);
10464b4a
SRV
562}
563
c84897c0 564static inline void rb_time_read(rb_time_t *t, u64 *ret)
10464b4a
SRV
565{
566 *ret = local64_read(&t->time);
10464b4a
SRV
567}
568static void rb_time_set(rb_time_t *t, u64 val)
569{
570 local64_set(&t->time, val);
571}
10464b4a 572
a948c69d
SRV
573/*
574 * Enable this to make sure that the event passed to
575 * ring_buffer_event_time_stamp() is not committed and also
576 * is on the buffer that it passed in.
577 */
578//#define RB_VERIFY_EVENT
579#ifdef RB_VERIFY_EVENT
580static struct list_head *rb_list_head(struct list_head *list);
581static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
582 void *event)
583{
584 struct buffer_page *page = cpu_buffer->commit_page;
585 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
586 struct list_head *next;
587 long commit, write;
588 unsigned long addr = (unsigned long)event;
589 bool done = false;
590 int stop = 0;
591
592 /* Make sure the event exists and is not committed yet */
593 do {
594 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
595 done = true;
596 commit = local_read(&page->page->commit);
597 write = local_read(&page->write);
598 if (addr >= (unsigned long)&page->page->data[commit] &&
599 addr < (unsigned long)&page->page->data[write])
600 return;
601
602 next = rb_list_head(page->list.next);
603 page = list_entry(next, struct buffer_page, list);
604 } while (!done);
605 WARN_ON_ONCE(1);
606}
607#else
608static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
609 void *event)
610{
611}
612#endif
613
6695da58
SRG
614/*
615 * The absolute time stamp drops the 5 MSBs and some clocks may
616 * require them. The rb_fix_abs_ts() will take a previous full
617 * time stamp, and add the 5 MSB of that time stamp on to the
618 * saved absolute time stamp. Then they are compared in case of
619 * the unlikely event that the latest time stamp incremented
620 * the 5 MSB.
621 */
622static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
623{
624 if (save_ts & TS_MSB) {
625 abs |= save_ts & TS_MSB;
626 /* Check for overflow */
627 if (unlikely(abs < save_ts))
628 abs += 1ULL << 59;
629 }
630 return abs;
631}
a948c69d 632
efe6196a
SRV
633static inline u64 rb_time_stamp(struct trace_buffer *buffer);
634
635/**
636 * ring_buffer_event_time_stamp - return the event's current time stamp
637 * @buffer: The buffer that the event is on
638 * @event: the event to get the time stamp of
639 *
640 * Note, this must be called after @event is reserved, and before it is
641 * committed to the ring buffer. And must be called from the same
642 * context where the event was reserved (normal, softirq, irq, etc).
643 *
644 * Returns the time stamp associated with the current event.
645 * If the event has an extended time stamp, then that is used as
646 * the time stamp to return.
647 * In the highly unlikely case that the event was nested more than
648 * the max nesting, then the write_stamp of the buffer is returned,
649 * otherwise current time is returned, but that really neither of
650 * the last two cases should ever happen.
651 */
652u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
653 struct ring_buffer_event *event)
654{
655 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
656 unsigned int nest;
657 u64 ts;
658
659 /* If the event includes an absolute time, then just use that */
6695da58
SRG
660 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
661 ts = rb_event_time_stamp(event);
662 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
663 }
efe6196a 664
a948c69d
SRV
665 nest = local_read(&cpu_buffer->committing);
666 verify_event(cpu_buffer, event);
667 if (WARN_ON_ONCE(!nest))
668 goto fail;
669
efe6196a 670 /* Read the current saved nesting level time stamp */
a948c69d 671 if (likely(--nest < MAX_NEST))
efe6196a
SRV
672 return cpu_buffer->event_stamp[nest];
673
a948c69d
SRV
674 /* Shouldn't happen, warn if it does */
675 WARN_ONCE(1, "nest (%d) greater than max", nest);
efe6196a 676
a948c69d 677 fail:
c84897c0 678 rb_time_read(&cpu_buffer->write_stamp, &ts);
efe6196a
SRV
679
680 return ts;
681}
682
2c2b0a78
SRV
683/**
684 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
685 * @buffer: The ring_buffer to get the number of pages from
686 * @cpu: The cpu of the ring_buffer to get the number of pages from
687 *
688 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
689 */
13292494 690size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
2c2b0a78
SRV
691{
692 return buffer->buffers[cpu]->nr_pages;
693}
694
695/**
b7085b6f 696 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
2c2b0a78
SRV
697 * @buffer: The ring_buffer to get the number of pages from
698 * @cpu: The cpu of the ring_buffer to get the number of pages from
699 *
700 * Returns the number of pages that have content in the ring buffer.
701 */
13292494 702size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
2c2b0a78
SRV
703{
704 size_t read;
31029a8b 705 size_t lost;
2c2b0a78
SRV
706 size_t cnt;
707
708 read = local_read(&buffer->buffers[cpu]->pages_read);
31029a8b 709 lost = local_read(&buffer->buffers[cpu]->pages_lost);
2c2b0a78 710 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
31029a8b
SRG
711
712 if (WARN_ON_ONCE(cnt < lost))
713 return 0;
714
715 cnt -= lost;
716
2c2b0a78
SRV
717 /* The reader can read an empty page, but not more than that */
718 if (cnt < read) {
719 WARN_ON_ONCE(read > cnt + 1);
720 return 0;
721 }
722
723 return cnt - read;
724}
725
42fb0a1e
SRG
726static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
727{
728 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
729 size_t nr_pages;
730 size_t dirty;
731
732 nr_pages = cpu_buffer->nr_pages;
733 if (!nr_pages || !full)
734 return true;
735
623b1f89
SRG
736 /*
737 * Add one as dirty will never equal nr_pages, as the sub-buffer
738 * that the writer is on is not counted as dirty.
739 * This is needed if "buffer_percent" is set to 100.
740 */
741 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
42fb0a1e 742
623b1f89 743 return (dirty * 100) >= (full * nr_pages);
42fb0a1e
SRG
744}
745
15693458
SRRH
746/*
747 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
748 *
749 * Schedules a delayed work to wake up any task that is blocked on the
750 * ring buffer waiters queue.
751 */
752static void rb_wake_up_waiters(struct irq_work *work)
753{
754 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
755
756 wake_up_all(&rbwork->waiters);
ec0bbc5e 757 if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
68282dd9
SRG
758 /* Only cpu_buffer sets the above flags */
759 struct ring_buffer_per_cpu *cpu_buffer =
760 container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
761
762 /* Called from interrupt context */
763 raw_spin_lock(&cpu_buffer->reader_lock);
1e0d6714 764 rbwork->wakeup_full = false;
ec0bbc5e 765 rbwork->full_waiters_pending = false;
68282dd9
SRG
766
767 /* Waking up all waiters, they will reset the shortest full */
768 cpu_buffer->shortest_full = 0;
769 raw_spin_unlock(&cpu_buffer->reader_lock);
770
1e0d6714
SRRH
771 wake_up_all(&rbwork->full_waiters);
772 }
15693458
SRRH
773}
774
7e9fbbb1
SRG
775/**
776 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
777 * @buffer: The ring buffer to wake waiters on
151e34d1 778 * @cpu: The CPU buffer to wake waiters on
7e9fbbb1
SRG
779 *
780 * In the case of a file that represents a ring buffer is closing,
781 * it is prudent to wake up any waiters that are on this.
782 */
783void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
784{
785 struct ring_buffer_per_cpu *cpu_buffer;
786 struct rb_irq_work *rbwork;
787
7433632c
SRG
788 if (!buffer)
789 return;
790
7e9fbbb1
SRG
791 if (cpu == RING_BUFFER_ALL_CPUS) {
792
793 /* Wake up individual ones too. One level recursion */
794 for_each_buffer_cpu(buffer, cpu)
795 ring_buffer_wake_waiters(buffer, cpu);
796
797 rbwork = &buffer->irq_work;
798 } else {
7433632c
SRG
799 if (WARN_ON_ONCE(!buffer->buffers))
800 return;
801 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
802 return;
803
7e9fbbb1 804 cpu_buffer = buffer->buffers[cpu];
7433632c
SRG
805 /* The CPU buffer may not have been initialized yet */
806 if (!cpu_buffer)
807 return;
7e9fbbb1
SRG
808 rbwork = &cpu_buffer->irq_work;
809 }
810
39a7dc23
SRG
811 /* This can be called in any context */
812 irq_work_queue(&rbwork->work);
7e9fbbb1
SRG
813}
814
b3594573
SRG
815static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
816{
817 struct ring_buffer_per_cpu *cpu_buffer;
818 bool ret = false;
819
820 /* Reads of all CPUs always waits for any data */
821 if (cpu == RING_BUFFER_ALL_CPUS)
822 return !ring_buffer_empty(buffer);
823
824 cpu_buffer = buffer->buffers[cpu];
825
826 if (!ring_buffer_empty_cpu(buffer, cpu)) {
827 unsigned long flags;
828 bool pagebusy;
829
830 if (!full)
831 return true;
832
833 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
834 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
835 ret = !pagebusy && full_hit(buffer, cpu, full);
836
837 if (!cpu_buffer->shortest_full ||
838 cpu_buffer->shortest_full > full)
839 cpu_buffer->shortest_full = full;
840 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
841 }
842 return ret;
843}
844
15693458
SRRH
845/**
846 * ring_buffer_wait - wait for input to the ring buffer
847 * @buffer: buffer to wait on
848 * @cpu: the cpu buffer to wait on
e1981f75 849 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
15693458
SRRH
850 *
851 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
852 * as data is added to any of the @buffer's cpu buffers. Otherwise
853 * it will wait for data to be added to a specific cpu buffer.
854 */
13292494 855int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
15693458 856{
3f649ab7 857 struct ring_buffer_per_cpu *cpu_buffer;
15693458
SRRH
858 DEFINE_WAIT(wait);
859 struct rb_irq_work *work;
e30f53aa 860 int ret = 0;
15693458
SRRH
861
862 /*
863 * Depending on what the caller is waiting for, either any
864 * data in any cpu buffer, or a specific buffer, put the
865 * caller on the appropriate wait queue.
866 */
1e0d6714 867 if (cpu == RING_BUFFER_ALL_CPUS) {
15693458 868 work = &buffer->irq_work;
1e0d6714 869 /* Full only makes sense on per cpu reads */
2c2b0a78 870 full = 0;
1e0d6714 871 } else {
8b8b3683
SRRH
872 if (!cpumask_test_cpu(cpu, buffer->cpumask))
873 return -ENODEV;
15693458
SRRH
874 cpu_buffer = buffer->buffers[cpu];
875 work = &cpu_buffer->irq_work;
876 }
877
b3594573
SRG
878 if (full)
879 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
880 else
881 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
882
883 /*
884 * The events can happen in critical sections where
885 * checking a work queue can cause deadlocks.
886 * After adding a task to the queue, this flag is set
887 * only to notify events to try to wake up the queue
888 * using irq_work.
889 *
890 * We don't clear it even if the buffer is no longer
891 * empty. The flag only causes the next event to run
892 * irq_work to do the work queue wake up. The worse
893 * that can happen if we race with !trace_empty() is that
894 * an event will cause an irq_work to try to wake up
895 * an empty queue.
896 *
897 * There's no reason to protect this flag either, as
898 * the work queue and irq_work logic will do the necessary
899 * synchronization for the wake ups. The only thing
900 * that is necessary is that the wake up happens after
901 * a task has been queued. It's OK for spurious wake ups.
902 */
903 if (full)
904 work->full_waiters_pending = true;
905 else
906 work->waiters_pending = true;
907
908 if (rb_watermark_hit(buffer, cpu, full))
909 goto out;
910
911 if (signal_pending(current)) {
912 ret = -EINTR;
913 goto out;
e30f53aa 914 }
15693458 915
b3594573
SRG
916 schedule();
917 out:
1e0d6714
SRRH
918 if (full)
919 finish_wait(&work->full_waiters, &wait);
920 else
921 finish_wait(&work->waiters, &wait);
e30f53aa 922
b3594573
SRG
923 if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
924 ret = -EINTR;
925
e30f53aa 926 return ret;
15693458
SRRH
927}
928
929/**
930 * ring_buffer_poll_wait - poll on buffer input
931 * @buffer: buffer to wait on
932 * @cpu: the cpu buffer to wait on
933 * @filp: the file descriptor
934 * @poll_table: The poll descriptor
42fb0a1e 935 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
15693458
SRRH
936 *
937 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
938 * as data is added to any of the @buffer's cpu buffers. Otherwise
939 * it will wait for data to be added to a specific cpu buffer.
940 *
a9a08845 941 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
15693458
SRRH
942 * zero otherwise.
943 */
13292494 944__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
42fb0a1e 945 struct file *filp, poll_table *poll_table, int full)
15693458
SRRH
946{
947 struct ring_buffer_per_cpu *cpu_buffer;
68282dd9 948 struct rb_irq_work *rbwork;
15693458 949
42fb0a1e 950 if (cpu == RING_BUFFER_ALL_CPUS) {
68282dd9 951 rbwork = &buffer->irq_work;
42fb0a1e
SRG
952 full = 0;
953 } else {
6721cb60 954 if (!cpumask_test_cpu(cpu, buffer->cpumask))
66bbea9e 955 return EPOLLERR;
6721cb60 956
15693458 957 cpu_buffer = buffer->buffers[cpu];
68282dd9 958 rbwork = &cpu_buffer->irq_work;
15693458
SRRH
959 }
960
42fb0a1e 961 if (full) {
68282dd9
SRG
962 unsigned long flags;
963
964 poll_wait(filp, &rbwork->full_waiters, poll_table);
965
966 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
967 rbwork->full_waiters_pending = true;
1e0cb399
SRG
968 if (!cpu_buffer->shortest_full ||
969 cpu_buffer->shortest_full > full)
970 cpu_buffer->shortest_full = full;
68282dd9 971 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
42fb0a1e 972 } else {
68282dd9
SRG
973 poll_wait(filp, &rbwork->waiters, poll_table);
974 rbwork->waiters_pending = true;
42fb0a1e
SRG
975 }
976
4ce97dbf
JB
977 /*
978 * There's a tight race between setting the waiters_pending and
979 * checking if the ring buffer is empty. Once the waiters_pending bit
980 * is set, the next event will wake the task up, but we can get stuck
981 * if there's only a single event in.
982 *
983 * FIXME: Ideally, we need a memory barrier on the writer side as well,
984 * but adding a memory barrier to all events will cause too much of a
985 * performance hit in the fast path. We only need a memory barrier when
986 * the buffer goes from empty to having content. But as this race is
987 * extremely small, and it's not a problem if another event comes in, we
988 * will fix it later.
989 */
990 smp_mb();
15693458 991
42fb0a1e
SRG
992 if (full)
993 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
994
15693458
SRRH
995 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
996 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
a9a08845 997 return EPOLLIN | EPOLLRDNORM;
15693458
SRRH
998 return 0;
999}
1000
f536aafc 1001/* buffer may be either ring_buffer or ring_buffer_per_cpu */
077c5407
SR
1002#define RB_WARN_ON(b, cond) \
1003 ({ \
1004 int _____ret = unlikely(cond); \
1005 if (_____ret) { \
1006 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1007 struct ring_buffer_per_cpu *__b = \
1008 (void *)b; \
1009 atomic_inc(&__b->buffer->record_disabled); \
1010 } else \
1011 atomic_inc(&b->record_disabled); \
1012 WARN_ON(1); \
1013 } \
1014 _____ret; \
3e89c7bb 1015 })
f536aafc 1016
37886f6a
SR
1017/* Up this if you want to test the TIME_EXTENTS and normalization */
1018#define DEBUG_SHIFT 0
1019
13292494 1020static inline u64 rb_time_stamp(struct trace_buffer *buffer)
88eb0125 1021{
bbeba3e5
SRV
1022 u64 ts;
1023
1024 /* Skip retpolines :-( */
1025 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1026 ts = trace_clock_local();
1027 else
1028 ts = buffer->clock();
1029
88eb0125 1030 /* shift to debug/test normalization and TIME_EXTENTS */
bbeba3e5 1031 return ts << DEBUG_SHIFT;
88eb0125
SR
1032}
1033
f3ef7202 1034u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
37886f6a
SR
1035{
1036 u64 time;
1037
1038 preempt_disable_notrace();
6d3f1e12 1039 time = rb_time_stamp(buffer);
d6097c9e 1040 preempt_enable_notrace();
37886f6a
SR
1041
1042 return time;
1043}
1044EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1045
13292494 1046void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
37886f6a
SR
1047 int cpu, u64 *ts)
1048{
1049 /* Just stupid testing the normalize function and deltas */
1050 *ts >>= DEBUG_SHIFT;
1051}
1052EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1053
77ae365e
SR
1054/*
1055 * Making the ring buffer lockless makes things tricky.
1056 * Although writes only happen on the CPU that they are on,
1057 * and they only need to worry about interrupts. Reads can
1058 * happen on any CPU.
1059 *
1060 * The reader page is always off the ring buffer, but when the
1061 * reader finishes with a page, it needs to swap its page with
1062 * a new one from the buffer. The reader needs to take from
1063 * the head (writes go to the tail). But if a writer is in overwrite
1064 * mode and wraps, it must push the head page forward.
1065 *
1066 * Here lies the problem.
1067 *
1068 * The reader must be careful to replace only the head page, and
1069 * not another one. As described at the top of the file in the
1070 * ASCII art, the reader sets its old page to point to the next
1071 * page after head. It then sets the page after head to point to
1072 * the old reader page. But if the writer moves the head page
1073 * during this operation, the reader could end up with the tail.
1074 *
1075 * We use cmpxchg to help prevent this race. We also do something
1076 * special with the page before head. We set the LSB to 1.
1077 *
1078 * When the writer must push the page forward, it will clear the
1079 * bit that points to the head page, move the head, and then set
1080 * the bit that points to the new head page.
1081 *
1082 * We also don't want an interrupt coming in and moving the head
1083 * page on another writer. Thus we use the second LSB to catch
1084 * that too. Thus:
1085 *
1086 * head->list->prev->next bit 1 bit 0
1087 * ------- -------
1088 * Normal page 0 0
1089 * Points to head page 0 1
1090 * New head page 1 0
1091 *
1092 * Note we can not trust the prev pointer of the head page, because:
1093 *
1094 * +----+ +-----+ +-----+
1095 * | |------>| T |---X--->| N |
1096 * | |<------| | | |
1097 * +----+ +-----+ +-----+
1098 * ^ ^ |
1099 * | +-----+ | |
1100 * +----------| R |----------+ |
1101 * | |<-----------+
1102 * +-----+
1103 *
1104 * Key: ---X--> HEAD flag set in pointer
1105 * T Tail page
1106 * R Reader page
1107 * N Next page
1108 *
1109 * (see __rb_reserve_next() to see where this happens)
1110 *
1111 * What the above shows is that the reader just swapped out
1112 * the reader page with a page in the buffer, but before it
1113 * could make the new header point back to the new page added
1114 * it was preempted by a writer. The writer moved forward onto
1115 * the new page added by the reader and is about to move forward
1116 * again.
1117 *
1118 * You can see, it is legitimate for the previous pointer of
1119 * the head (or any page) not to point back to itself. But only
6167c205 1120 * temporarily.
77ae365e
SR
1121 */
1122
1123#define RB_PAGE_NORMAL 0UL
1124#define RB_PAGE_HEAD 1UL
1125#define RB_PAGE_UPDATE 2UL
1126
1127
1128#define RB_FLAG_MASK 3UL
1129
1130/* PAGE_MOVED is not part of the mask */
1131#define RB_PAGE_MOVED 4UL
1132
1133/*
1134 * rb_list_head - remove any bit
1135 */
1136static struct list_head *rb_list_head(struct list_head *list)
1137{
1138 unsigned long val = (unsigned long)list;
1139
1140 return (struct list_head *)(val & ~RB_FLAG_MASK);
1141}
1142
1143/*
6d3f1e12 1144 * rb_is_head_page - test if the given page is the head page
77ae365e
SR
1145 *
1146 * Because the reader may move the head_page pointer, we can
1147 * not trust what the head page is (it may be pointing to
1148 * the reader page). But if the next page is a header page,
1149 * its flags will be non zero.
1150 */
42b16b3f 1151static inline int
6689bed3 1152rb_is_head_page(struct buffer_page *page, struct list_head *list)
77ae365e
SR
1153{
1154 unsigned long val;
1155
1156 val = (unsigned long)list->next;
1157
1158 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1159 return RB_PAGE_MOVED;
1160
1161 return val & RB_FLAG_MASK;
1162}
1163
1164/*
1165 * rb_is_reader_page
1166 *
1167 * The unique thing about the reader page, is that, if the
1168 * writer is ever on it, the previous pointer never points
1169 * back to the reader page.
1170 */
06ca3209 1171static bool rb_is_reader_page(struct buffer_page *page)
77ae365e
SR
1172{
1173 struct list_head *list = page->list.prev;
1174
1175 return rb_list_head(list->next) != &page->list;
1176}
1177
1178/*
1179 * rb_set_list_to_head - set a list_head to be pointing to head.
1180 */
6689bed3 1181static void rb_set_list_to_head(struct list_head *list)
77ae365e
SR
1182{
1183 unsigned long *ptr;
1184
1185 ptr = (unsigned long *)&list->next;
1186 *ptr |= RB_PAGE_HEAD;
1187 *ptr &= ~RB_PAGE_UPDATE;
1188}
1189
1190/*
1191 * rb_head_page_activate - sets up head page
1192 */
1193static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1194{
1195 struct buffer_page *head;
1196
1197 head = cpu_buffer->head_page;
1198 if (!head)
1199 return;
1200
1201 /*
1202 * Set the previous list pointer to have the HEAD flag.
1203 */
6689bed3 1204 rb_set_list_to_head(head->list.prev);
77ae365e
SR
1205}
1206
1207static void rb_list_head_clear(struct list_head *list)
1208{
1209 unsigned long *ptr = (unsigned long *)&list->next;
1210
1211 *ptr &= ~RB_FLAG_MASK;
1212}
1213
1214/*
6167c205 1215 * rb_head_page_deactivate - clears head page ptr (for free list)
77ae365e
SR
1216 */
1217static void
1218rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1219{
1220 struct list_head *hd;
1221
1222 /* Go through the whole list and clear any pointers found. */
1223 rb_list_head_clear(cpu_buffer->pages);
1224
1225 list_for_each(hd, cpu_buffer->pages)
1226 rb_list_head_clear(hd);
1227}
1228
1229static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1230 struct buffer_page *head,
1231 struct buffer_page *prev,
1232 int old_flag, int new_flag)
1233{
1234 struct list_head *list;
1235 unsigned long val = (unsigned long)&head->list;
1236 unsigned long ret;
1237
1238 list = &prev->list;
1239
1240 val &= ~RB_FLAG_MASK;
1241
08a40816
SR
1242 ret = cmpxchg((unsigned long *)&list->next,
1243 val | old_flag, val | new_flag);
77ae365e
SR
1244
1245 /* check if the reader took the page */
1246 if ((ret & ~RB_FLAG_MASK) != val)
1247 return RB_PAGE_MOVED;
1248
1249 return ret & RB_FLAG_MASK;
1250}
1251
1252static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1253 struct buffer_page *head,
1254 struct buffer_page *prev,
1255 int old_flag)
1256{
1257 return rb_head_page_set(cpu_buffer, head, prev,
1258 old_flag, RB_PAGE_UPDATE);
1259}
1260
1261static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1262 struct buffer_page *head,
1263 struct buffer_page *prev,
1264 int old_flag)
1265{
1266 return rb_head_page_set(cpu_buffer, head, prev,
1267 old_flag, RB_PAGE_HEAD);
1268}
1269
1270static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1271 struct buffer_page *head,
1272 struct buffer_page *prev,
1273 int old_flag)
1274{
1275 return rb_head_page_set(cpu_buffer, head, prev,
1276 old_flag, RB_PAGE_NORMAL);
1277}
1278
6689bed3 1279static inline void rb_inc_page(struct buffer_page **bpage)
77ae365e
SR
1280{
1281 struct list_head *p = rb_list_head((*bpage)->list.next);
1282
1283 *bpage = list_entry(p, struct buffer_page, list);
1284}
1285
1286static struct buffer_page *
1287rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1288{
1289 struct buffer_page *head;
1290 struct buffer_page *page;
1291 struct list_head *list;
1292 int i;
1293
1294 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1295 return NULL;
1296
1297 /* sanity check */
1298 list = cpu_buffer->pages;
1299 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1300 return NULL;
1301
1302 page = head = cpu_buffer->head_page;
1303 /*
1304 * It is possible that the writer moves the header behind
1305 * where we started, and we miss in one loop.
1306 * A second loop should grab the header, but we'll do
1307 * three loops just because I'm paranoid.
1308 */
1309 for (i = 0; i < 3; i++) {
1310 do {
6689bed3 1311 if (rb_is_head_page(page, page->list.prev)) {
77ae365e
SR
1312 cpu_buffer->head_page = page;
1313 return page;
1314 }
6689bed3 1315 rb_inc_page(&page);
77ae365e
SR
1316 } while (page != head);
1317 }
1318
1319 RB_WARN_ON(cpu_buffer, 1);
1320
1321 return NULL;
1322}
1323
bc92b956 1324static bool rb_head_page_replace(struct buffer_page *old,
77ae365e
SR
1325 struct buffer_page *new)
1326{
1327 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1328 unsigned long val;
77ae365e
SR
1329
1330 val = *ptr & ~RB_FLAG_MASK;
1331 val |= RB_PAGE_HEAD;
1332
00a8478f 1333 return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
77ae365e
SR
1334}
1335
1336/*
1337 * rb_tail_page_update - move the tail page forward
77ae365e 1338 */
70004986 1339static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
77ae365e
SR
1340 struct buffer_page *tail_page,
1341 struct buffer_page *next_page)
1342{
77ae365e
SR
1343 unsigned long old_entries;
1344 unsigned long old_write;
77ae365e
SR
1345
1346 /*
1347 * The tail page now needs to be moved forward.
1348 *
1349 * We need to reset the tail page, but without messing
1350 * with possible erasing of data brought in by interrupts
1351 * that have moved the tail page and are currently on it.
1352 *
1353 * We add a counter to the write field to denote this.
1354 */
1355 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1356 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1357
2c2b0a78 1358 local_inc(&cpu_buffer->pages_touched);
77ae365e
SR
1359 /*
1360 * Just make sure we have seen our old_write and synchronize
1361 * with any interrupts that come in.
1362 */
1363 barrier();
1364
1365 /*
1366 * If the tail page is still the same as what we think
1367 * it is, then it is up to us to update the tail
1368 * pointer.
1369 */
8573636e 1370 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
77ae365e
SR
1371 /* Zero the write counter */
1372 unsigned long val = old_write & ~RB_WRITE_MASK;
1373 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1374
1375 /*
1376 * This will only succeed if an interrupt did
1377 * not come in and change it. In which case, we
1378 * do not want to modify it.
da706d8b
LJ
1379 *
1380 * We add (void) to let the compiler know that we do not care
1381 * about the return value of these functions. We use the
1382 * cmpxchg to only update if an interrupt did not already
1383 * do it for us. If the cmpxchg fails, we don't care.
77ae365e 1384 */
da706d8b
LJ
1385 (void)local_cmpxchg(&next_page->write, old_write, val);
1386 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77ae365e
SR
1387
1388 /*
1389 * No need to worry about races with clearing out the commit.
1390 * it only can increment when a commit takes place. But that
1391 * only happens in the outer most nested commit.
1392 */
1393 local_set(&next_page->page->commit, 0);
1394
70004986
SRRH
1395 /* Again, either we update tail_page or an interrupt does */
1396 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
77ae365e 1397 }
77ae365e
SR
1398}
1399
b4b55dfd 1400static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
77ae365e
SR
1401 struct buffer_page *bpage)
1402{
1403 unsigned long val = (unsigned long)bpage;
1404
b4b55dfd 1405 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
77ae365e
SR
1406}
1407
7a8e76a3 1408/**
d611851b 1409 * rb_check_pages - integrity check of buffer pages
7a8e76a3
SR
1410 * @cpu_buffer: CPU buffer with pages to test
1411 *
c3706f00 1412 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
1413 * been corrupted.
1414 */
b4b55dfd 1415static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1416{
8843e06f
MO
1417 struct list_head *head = rb_list_head(cpu_buffer->pages);
1418 struct list_head *tmp;
308f7eeb 1419
8843e06f
MO
1420 if (RB_WARN_ON(cpu_buffer,
1421 rb_list_head(rb_list_head(head->next)->prev) != head))
b4b55dfd 1422 return;
7a8e76a3 1423
8843e06f
MO
1424 if (RB_WARN_ON(cpu_buffer,
1425 rb_list_head(rb_list_head(head->prev)->next) != head))
b4b55dfd 1426 return;
77ae365e 1427
8843e06f 1428 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
3e89c7bb 1429 if (RB_WARN_ON(cpu_buffer,
8843e06f 1430 rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
b4b55dfd 1431 return;
8843e06f 1432
3e89c7bb 1433 if (RB_WARN_ON(cpu_buffer,
8843e06f 1434 rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
b4b55dfd 1435 return;
7a8e76a3 1436 }
7a8e76a3
SR
1437}
1438
74e2afc6
QH
1439static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1440 long nr_pages, struct list_head *pages)
7a8e76a3 1441{
044fa782 1442 struct buffer_page *bpage, *tmp;
927e56db
SRV
1443 bool user_thread = current->mm != NULL;
1444 gfp_t mflags;
9b94a8fb 1445 long i;
3adc54fa 1446
927e56db
SRV
1447 /*
1448 * Check if the available memory is there first.
1449 * Note, si_mem_available() only gives us a rough estimate of available
1450 * memory. It may not be accurate. But we don't care, we just want
1451 * to prevent doing any allocation when it is obvious that it is
1452 * not going to succeed.
1453 */
2a872fa4
SRV
1454 i = si_mem_available();
1455 if (i < nr_pages)
1456 return -ENOMEM;
1457
927e56db
SRV
1458 /*
1459 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1460 * gracefully without invoking oom-killer and the system is not
1461 * destabilized.
1462 */
1463 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1464
1465 /*
1466 * If a user thread allocates too much, and si_mem_available()
1467 * reports there's enough memory, even though there is not.
1468 * Make sure the OOM killer kills this thread. This can happen
1469 * even with RETRY_MAYFAIL because another task may be doing
1470 * an allocation after this task has taken all memory.
1471 * This is the task the OOM killer needs to take out during this
1472 * loop, even if it was triggered by an allocation somewhere else.
1473 */
1474 if (user_thread)
1475 set_current_oom_origin();
7a8e76a3 1476 for (i = 0; i < nr_pages; i++) {
7ea59064 1477 struct page *page;
927e56db 1478
044fa782 1479 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
74e2afc6 1480 mflags, cpu_to_node(cpu_buffer->cpu));
044fa782 1481 if (!bpage)
e4c2ce82 1482 goto free_pages;
77ae365e 1483
74e2afc6
QH
1484 rb_check_bpage(cpu_buffer, bpage);
1485
438ced17 1486 list_add(&bpage->list, pages);
77ae365e 1487
f9b94daa
TSV
1488 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
1489 cpu_buffer->buffer->subbuf_order);
7ea59064 1490 if (!page)
7a8e76a3 1491 goto free_pages;
7ea59064 1492 bpage->page = page_address(page);
f9b94daa 1493 bpage->order = cpu_buffer->buffer->subbuf_order;
044fa782 1494 rb_init_page(bpage->page);
927e56db
SRV
1495
1496 if (user_thread && fatal_signal_pending(current))
1497 goto free_pages;
7a8e76a3 1498 }
927e56db
SRV
1499 if (user_thread)
1500 clear_current_oom_origin();
7a8e76a3 1501
438ced17
VN
1502 return 0;
1503
1504free_pages:
1505 list_for_each_entry_safe(bpage, tmp, pages, list) {
1506 list_del_init(&bpage->list);
1507 free_buffer_page(bpage);
1508 }
927e56db
SRV
1509 if (user_thread)
1510 clear_current_oom_origin();
438ced17
VN
1511
1512 return -ENOMEM;
1513}
1514
1515static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
9b94a8fb 1516 unsigned long nr_pages)
438ced17
VN
1517{
1518 LIST_HEAD(pages);
1519
1520 WARN_ON(!nr_pages);
1521
74e2afc6 1522 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
438ced17
VN
1523 return -ENOMEM;
1524
3adc54fa
SR
1525 /*
1526 * The ring buffer page list is a circular list that does not
1527 * start and end with a list head. All page list items point to
1528 * other pages.
1529 */
1530 cpu_buffer->pages = pages.next;
1531 list_del(&pages);
7a8e76a3 1532
438ced17
VN
1533 cpu_buffer->nr_pages = nr_pages;
1534
7a8e76a3
SR
1535 rb_check_pages(cpu_buffer);
1536
1537 return 0;
7a8e76a3
SR
1538}
1539
1540static struct ring_buffer_per_cpu *
13292494 1541rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
7a8e76a3
SR
1542{
1543 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 1544 struct buffer_page *bpage;
7ea59064 1545 struct page *page;
7a8e76a3
SR
1546 int ret;
1547
1548 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1549 GFP_KERNEL, cpu_to_node(cpu));
1550 if (!cpu_buffer)
1551 return NULL;
1552
1553 cpu_buffer->cpu = cpu;
1554 cpu_buffer->buffer = buffer;
5389f6fa 1555 raw_spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 1556 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
edc35bd7 1557 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
83f40318 1558 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
05fdd70d 1559 init_completion(&cpu_buffer->update_done);
15693458 1560 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
f1dc6725 1561 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1e0d6714 1562 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
7a8e76a3 1563
044fa782 1564 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 1565 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1566 if (!bpage)
e4c2ce82
SR
1567 goto fail_free_buffer;
1568
77ae365e
SR
1569 rb_check_bpage(cpu_buffer, bpage);
1570
044fa782 1571 cpu_buffer->reader_page = bpage;
f9b94daa
TSV
1572
1573 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order);
7ea59064 1574 if (!page)
e4c2ce82 1575 goto fail_free_reader;
7ea59064 1576 bpage->page = page_address(page);
044fa782 1577 rb_init_page(bpage->page);
e4c2ce82 1578
d769041f 1579 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
44b99462 1580 INIT_LIST_HEAD(&cpu_buffer->new_pages);
d769041f 1581
438ced17 1582 ret = rb_allocate_pages(cpu_buffer, nr_pages);
7a8e76a3 1583 if (ret < 0)
d769041f 1584 goto fail_free_reader;
7a8e76a3
SR
1585
1586 cpu_buffer->head_page
3adc54fa 1587 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 1588 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3 1589
77ae365e
SR
1590 rb_head_page_activate(cpu_buffer);
1591
7a8e76a3
SR
1592 return cpu_buffer;
1593
d769041f
SR
1594 fail_free_reader:
1595 free_buffer_page(cpu_buffer->reader_page);
1596
7a8e76a3
SR
1597 fail_free_buffer:
1598 kfree(cpu_buffer);
1599 return NULL;
1600}
1601
1602static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1603{
3adc54fa 1604 struct list_head *head = cpu_buffer->pages;
044fa782 1605 struct buffer_page *bpage, *tmp;
7a8e76a3 1606
675751bb
JB
1607 irq_work_sync(&cpu_buffer->irq_work.work);
1608
d769041f
SR
1609 free_buffer_page(cpu_buffer->reader_page);
1610
3adc54fa 1611 if (head) {
56f4ca0a
DT
1612 rb_head_page_deactivate(cpu_buffer);
1613
3adc54fa
SR
1614 list_for_each_entry_safe(bpage, tmp, head, list) {
1615 list_del_init(&bpage->list);
1616 free_buffer_page(bpage);
1617 }
1618 bpage = list_entry(head, struct buffer_page, list);
044fa782 1619 free_buffer_page(bpage);
7a8e76a3 1620 }
3adc54fa 1621
17d80175
SRG
1622 free_page((unsigned long)cpu_buffer->free_page);
1623
7a8e76a3
SR
1624 kfree(cpu_buffer);
1625}
1626
1627/**
d611851b 1628 * __ring_buffer_alloc - allocate a new ring_buffer
68814b58 1629 * @size: the size in bytes per cpu that is needed.
7a8e76a3 1630 * @flags: attributes to set for the ring buffer.
59e7cffe 1631 * @key: ring buffer reader_lock_key.
7a8e76a3
SR
1632 *
1633 * Currently the only flag that is available is the RB_FL_OVERWRITE
1634 * flag. This flag means that the buffer will overwrite old data
1635 * when the buffer wraps. If this flag is not set, the buffer will
1636 * drop data when the tail hits the head.
1637 */
13292494 1638struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1f8a6a10 1639 struct lock_class_key *key)
7a8e76a3 1640{
13292494 1641 struct trace_buffer *buffer;
9b94a8fb 1642 long nr_pages;
7a8e76a3 1643 int bsize;
9b94a8fb 1644 int cpu;
b32614c0 1645 int ret;
7a8e76a3
SR
1646
1647 /* keep it in its own cache line */
1648 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1649 GFP_KERNEL);
1650 if (!buffer)
1651 return NULL;
1652
b18cc3de 1653 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
9e01c1b7
RR
1654 goto fail_free_buffer;
1655
139f8400 1656 /* Default buffer page size - one system page */
f9b94daa 1657 buffer->subbuf_order = 0;
139f8400
TSV
1658 buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
1659
1660 /* Max payload is buffer page size - header (8bytes) */
1661 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
1662
1663 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
7a8e76a3 1664 buffer->flags = flags;
37886f6a 1665 buffer->clock = trace_clock_local;
1f8a6a10 1666 buffer->reader_lock_key = key;
7a8e76a3 1667
15693458 1668 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
f1dc6725 1669 init_waitqueue_head(&buffer->irq_work.waiters);
15693458 1670
7a8e76a3 1671 /* need at least two pages */
438ced17
VN
1672 if (nr_pages < 2)
1673 nr_pages = 2;
7a8e76a3 1674
7a8e76a3
SR
1675 buffer->cpus = nr_cpu_ids;
1676
1677 bsize = sizeof(void *) * nr_cpu_ids;
1678 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1679 GFP_KERNEL);
1680 if (!buffer->buffers)
9e01c1b7 1681 goto fail_free_cpumask;
7a8e76a3 1682
b32614c0
SAS
1683 cpu = raw_smp_processor_id();
1684 cpumask_set_cpu(cpu, buffer->cpumask);
1685 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1686 if (!buffer->buffers[cpu])
1687 goto fail_free_buffers;
7a8e76a3 1688
b32614c0
SAS
1689 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1690 if (ret < 0)
1691 goto fail_free_buffers;
554f786e 1692
7a8e76a3
SR
1693 mutex_init(&buffer->mutex);
1694
1695 return buffer;
1696
1697 fail_free_buffers:
1698 for_each_buffer_cpu(buffer, cpu) {
1699 if (buffer->buffers[cpu])
1700 rb_free_cpu_buffer(buffer->buffers[cpu]);
1701 }
1702 kfree(buffer->buffers);
1703
9e01c1b7
RR
1704 fail_free_cpumask:
1705 free_cpumask_var(buffer->cpumask);
1706
7a8e76a3
SR
1707 fail_free_buffer:
1708 kfree(buffer);
1709 return NULL;
1710}
1f8a6a10 1711EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
1712
1713/**
1714 * ring_buffer_free - free a ring buffer.
1715 * @buffer: the buffer to free.
1716 */
1717void
13292494 1718ring_buffer_free(struct trace_buffer *buffer)
7a8e76a3
SR
1719{
1720 int cpu;
1721
b32614c0 1722 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
554f786e 1723
675751bb
JB
1724 irq_work_sync(&buffer->irq_work.work);
1725
7a8e76a3
SR
1726 for_each_buffer_cpu(buffer, cpu)
1727 rb_free_cpu_buffer(buffer->buffers[cpu]);
1728
bd3f0221 1729 kfree(buffer->buffers);
9e01c1b7
RR
1730 free_cpumask_var(buffer->cpumask);
1731
7a8e76a3
SR
1732 kfree(buffer);
1733}
c4f50183 1734EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 1735
13292494 1736void ring_buffer_set_clock(struct trace_buffer *buffer,
37886f6a
SR
1737 u64 (*clock)(void))
1738{
1739 buffer->clock = clock;
1740}
1741
13292494 1742void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
00b41452
TZ
1743{
1744 buffer->time_stamp_abs = abs;
1745}
1746
13292494 1747bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
00b41452
TZ
1748{
1749 return buffer->time_stamp_abs;
1750}
1751
7a8e76a3
SR
1752static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1753
83f40318
VN
1754static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1755{
1756 return local_read(&bpage->entries) & RB_WRITE_MASK;
1757}
1758
1759static inline unsigned long rb_page_write(struct buffer_page *bpage)
1760{
1761 return local_read(&bpage->write) & RB_WRITE_MASK;
1762}
1763
bc92b956 1764static bool
9b94a8fb 1765rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
7a8e76a3 1766{
83f40318
VN
1767 struct list_head *tail_page, *to_remove, *next_page;
1768 struct buffer_page *to_remove_page, *tmp_iter_page;
1769 struct buffer_page *last_page, *first_page;
9b94a8fb 1770 unsigned long nr_removed;
83f40318
VN
1771 unsigned long head_bit;
1772 int page_entries;
1773
1774 head_bit = 0;
7a8e76a3 1775
5389f6fa 1776 raw_spin_lock_irq(&cpu_buffer->reader_lock);
83f40318
VN
1777 atomic_inc(&cpu_buffer->record_disabled);
1778 /*
1779 * We don't race with the readers since we have acquired the reader
1780 * lock. We also don't race with writers after disabling recording.
1781 * This makes it easy to figure out the first and the last page to be
1782 * removed from the list. We unlink all the pages in between including
1783 * the first and last pages. This is done in a busy loop so that we
1784 * lose the least number of traces.
1785 * The pages are freed after we restart recording and unlock readers.
1786 */
1787 tail_page = &cpu_buffer->tail_page->list;
77ae365e 1788
83f40318
VN
1789 /*
1790 * tail page might be on reader page, we remove the next page
1791 * from the ring buffer
1792 */
1793 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1794 tail_page = rb_list_head(tail_page->next);
1795 to_remove = tail_page;
1796
1797 /* start of pages to remove */
1798 first_page = list_entry(rb_list_head(to_remove->next),
1799 struct buffer_page, list);
1800
1801 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1802 to_remove = rb_list_head(to_remove)->next;
1803 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
7a8e76a3 1804 }
2d093282
ZY
1805 /* Read iterators need to reset themselves when some pages removed */
1806 cpu_buffer->pages_removed += nr_removed;
7a8e76a3 1807
83f40318 1808 next_page = rb_list_head(to_remove)->next;
7a8e76a3 1809
83f40318
VN
1810 /*
1811 * Now we remove all pages between tail_page and next_page.
1812 * Make sure that we have head_bit value preserved for the
1813 * next page
1814 */
1815 tail_page->next = (struct list_head *)((unsigned long)next_page |
1816 head_bit);
1817 next_page = rb_list_head(next_page);
1818 next_page->prev = tail_page;
1819
1820 /* make sure pages points to a valid page in the ring buffer */
1821 cpu_buffer->pages = next_page;
1822
1823 /* update head page */
1824 if (head_bit)
1825 cpu_buffer->head_page = list_entry(next_page,
1826 struct buffer_page, list);
1827
83f40318
VN
1828 /* pages are removed, resume tracing and then free the pages */
1829 atomic_dec(&cpu_buffer->record_disabled);
5389f6fa 1830 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
83f40318
VN
1831
1832 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1833
1834 /* last buffer page to remove */
1835 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1836 list);
1837 tmp_iter_page = first_page;
1838
1839 do {
83f36555
VN
1840 cond_resched();
1841
83f40318 1842 to_remove_page = tmp_iter_page;
6689bed3 1843 rb_inc_page(&tmp_iter_page);
83f40318
VN
1844
1845 /* update the counters */
1846 page_entries = rb_page_entries(to_remove_page);
1847 if (page_entries) {
1848 /*
1849 * If something was added to this page, it was full
1850 * since it is not the tail page. So we deduct the
1851 * bytes consumed in ring buffer from here.
48fdc72f 1852 * Increment overrun to account for the lost events.
83f40318 1853 */
48fdc72f 1854 local_add(page_entries, &cpu_buffer->overrun);
45d99ea4 1855 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
31029a8b 1856 local_inc(&cpu_buffer->pages_lost);
83f40318
VN
1857 }
1858
1859 /*
1860 * We have already removed references to this list item, just
1861 * free up the buffer_page and its page
1862 */
1863 free_buffer_page(to_remove_page);
1864 nr_removed--;
1865
1866 } while (to_remove_page != last_page);
1867
1868 RB_WARN_ON(cpu_buffer, nr_removed);
5040b4b7
VN
1869
1870 return nr_removed == 0;
7a8e76a3
SR
1871}
1872
bc92b956 1873static bool
5040b4b7 1874rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1875{
5040b4b7 1876 struct list_head *pages = &cpu_buffer->new_pages;
88ca6a71 1877 unsigned long flags;
bc92b956
UB
1878 bool success;
1879 int retries;
7a8e76a3 1880
88ca6a71
SR
1881 /* Can be called at early boot up, where interrupts must not been enabled */
1882 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5040b4b7
VN
1883 /*
1884 * We are holding the reader lock, so the reader page won't be swapped
1885 * in the ring buffer. Now we are racing with the writer trying to
1886 * move head page and the tail page.
1887 * We are going to adapt the reader page update process where:
1888 * 1. We first splice the start and end of list of new pages between
1889 * the head page and its previous page.
1890 * 2. We cmpxchg the prev_page->next to point from head page to the
1891 * start of new pages list.
1892 * 3. Finally, we update the head->prev to the end of new list.
1893 *
1894 * We will try this process 10 times, to make sure that we don't keep
1895 * spinning.
1896 */
1897 retries = 10;
bc92b956 1898 success = false;
5040b4b7 1899 while (retries--) {
bdf4fb62 1900 struct list_head *head_page, *prev_page;
5040b4b7
VN
1901 struct list_head *last_page, *first_page;
1902 struct list_head *head_page_with_bit;
625ed527 1903 struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
77ae365e 1904
625ed527 1905 if (!hpage)
54f7be5b 1906 break;
625ed527 1907 head_page = &hpage->list;
5040b4b7
VN
1908 prev_page = head_page->prev;
1909
1910 first_page = pages->next;
1911 last_page = pages->prev;
1912
1913 head_page_with_bit = (struct list_head *)
1914 ((unsigned long)head_page | RB_PAGE_HEAD);
1915
1916 last_page->next = head_page_with_bit;
1917 first_page->prev = prev_page;
1918
bdf4fb62
UB
1919 /* caution: head_page_with_bit gets updated on cmpxchg failure */
1920 if (try_cmpxchg(&prev_page->next,
1921 &head_page_with_bit, first_page)) {
5040b4b7
VN
1922 /*
1923 * yay, we replaced the page pointer to our new list,
1924 * now, we just have to update to head page's prev
1925 * pointer to point to end of list
1926 */
1927 head_page->prev = last_page;
bc92b956 1928 success = true;
5040b4b7
VN
1929 break;
1930 }
7a8e76a3 1931 }
7a8e76a3 1932
5040b4b7
VN
1933 if (success)
1934 INIT_LIST_HEAD(pages);
1935 /*
1936 * If we weren't successful in adding in new pages, warn and stop
1937 * tracing
1938 */
1939 RB_WARN_ON(cpu_buffer, !success);
88ca6a71 1940 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5040b4b7
VN
1941
1942 /* free pages if they weren't inserted */
1943 if (!success) {
1944 struct buffer_page *bpage, *tmp;
1945 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1946 list) {
1947 list_del_init(&bpage->list);
1948 free_buffer_page(bpage);
1949 }
1950 }
1951 return success;
7a8e76a3
SR
1952}
1953
83f40318 1954static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
438ced17 1955{
bc92b956 1956 bool success;
5040b4b7 1957
438ced17 1958 if (cpu_buffer->nr_pages_to_update > 0)
5040b4b7 1959 success = rb_insert_pages(cpu_buffer);
438ced17 1960 else
5040b4b7
VN
1961 success = rb_remove_pages(cpu_buffer,
1962 -cpu_buffer->nr_pages_to_update);
83f40318 1963
5040b4b7
VN
1964 if (success)
1965 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
83f40318
VN
1966}
1967
1968static void update_pages_handler(struct work_struct *work)
1969{
1970 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1971 struct ring_buffer_per_cpu, update_pages_work);
1972 rb_update_pages(cpu_buffer);
05fdd70d 1973 complete(&cpu_buffer->update_done);
438ced17
VN
1974}
1975
7a8e76a3
SR
1976/**
1977 * ring_buffer_resize - resize the ring buffer
1978 * @buffer: the buffer to resize.
1979 * @size: the new size.
d611851b 1980 * @cpu_id: the cpu buffer to resize
7a8e76a3 1981 *
139f8400 1982 * Minimum size is 2 * buffer->subbuf_size.
7a8e76a3 1983 *
83f40318 1984 * Returns 0 on success and < 0 on failure.
7a8e76a3 1985 */
13292494 1986int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
438ced17 1987 int cpu_id)
7a8e76a3
SR
1988{
1989 struct ring_buffer_per_cpu *cpu_buffer;
9b94a8fb 1990 unsigned long nr_pages;
0a1754b2 1991 int cpu, err;
7a8e76a3 1992
ee51a1de
IM
1993 /*
1994 * Always succeed at resizing a non-existent buffer:
1995 */
1996 if (!buffer)
0a1754b2 1997 return 0;
ee51a1de 1998
6a31e1f1
SR
1999 /* Make sure the requested buffer exists */
2000 if (cpu_id != RING_BUFFER_ALL_CPUS &&
2001 !cpumask_test_cpu(cpu_id, buffer->cpumask))
0a1754b2 2002 return 0;
6a31e1f1 2003
139f8400 2004 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
7a8e76a3
SR
2005
2006 /* we need a minimum of two pages */
59643d15
SRRH
2007 if (nr_pages < 2)
2008 nr_pages = 2;
7a8e76a3 2009
83f40318 2010 /* prevent another thread from changing buffer sizes */
7a8e76a3 2011 mutex_lock(&buffer->mutex);
8a96c028 2012 atomic_inc(&buffer->resizing);
07b8b10e 2013
438ced17 2014 if (cpu_id == RING_BUFFER_ALL_CPUS) {
07b8b10e
SRV
2015 /*
2016 * Don't succeed if resizing is disabled, as a reader might be
2017 * manipulating the ring buffer and is expecting a sane state while
2018 * this is true.
2019 */
2020 for_each_buffer_cpu(buffer, cpu) {
2021 cpu_buffer = buffer->buffers[cpu];
2022 if (atomic_read(&cpu_buffer->resize_disabled)) {
2023 err = -EBUSY;
2024 goto out_err_unlock;
2025 }
2026 }
2027
438ced17 2028 /* calculate the pages to update */
7a8e76a3
SR
2029 for_each_buffer_cpu(buffer, cpu) {
2030 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 2031
438ced17
VN
2032 cpu_buffer->nr_pages_to_update = nr_pages -
2033 cpu_buffer->nr_pages;
438ced17
VN
2034 /*
2035 * nothing more to do for removing pages or no update
2036 */
2037 if (cpu_buffer->nr_pages_to_update <= 0)
2038 continue;
d7ec4bfe 2039 /*
438ced17
VN
2040 * to add pages, make sure all new pages can be
2041 * allocated without receiving ENOMEM
d7ec4bfe 2042 */
438ced17 2043 INIT_LIST_HEAD(&cpu_buffer->new_pages);
74e2afc6
QH
2044 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2045 &cpu_buffer->new_pages)) {
438ced17 2046 /* not enough memory for new pages */
83f40318
VN
2047 err = -ENOMEM;
2048 goto out_err;
2049 }
f6bd2c92
ZY
2050
2051 cond_resched();
83f40318
VN
2052 }
2053
99c37d1a 2054 cpus_read_lock();
83f40318
VN
2055 /*
2056 * Fire off all the required work handlers
05fdd70d 2057 * We can't schedule on offline CPUs, but it's not necessary
83f40318
VN
2058 * since we can change their buffer sizes without any race.
2059 */
2060 for_each_buffer_cpu(buffer, cpu) {
2061 cpu_buffer = buffer->buffers[cpu];
05fdd70d 2062 if (!cpu_buffer->nr_pages_to_update)
83f40318
VN
2063 continue;
2064
021c5b34
CM
2065 /* Can't run something on an offline CPU. */
2066 if (!cpu_online(cpu)) {
f5eb5588
SRRH
2067 rb_update_pages(cpu_buffer);
2068 cpu_buffer->nr_pages_to_update = 0;
2069 } else {
88ca6a71
SR
2070 /* Run directly if possible. */
2071 migrate_disable();
2072 if (cpu != smp_processor_id()) {
2073 migrate_enable();
2074 schedule_work_on(cpu,
2075 &cpu_buffer->update_pages_work);
2076 } else {
2077 update_pages_handler(&cpu_buffer->update_pages_work);
2078 migrate_enable();
2079 }
f5eb5588 2080 }
7a8e76a3 2081 }
7a8e76a3 2082
438ced17
VN
2083 /* wait for all the updates to complete */
2084 for_each_buffer_cpu(buffer, cpu) {
2085 cpu_buffer = buffer->buffers[cpu];
05fdd70d 2086 if (!cpu_buffer->nr_pages_to_update)
83f40318
VN
2087 continue;
2088
05fdd70d
VN
2089 if (cpu_online(cpu))
2090 wait_for_completion(&cpu_buffer->update_done);
83f40318 2091 cpu_buffer->nr_pages_to_update = 0;
438ced17 2092 }
83f40318 2093
99c37d1a 2094 cpus_read_unlock();
438ced17
VN
2095 } else {
2096 cpu_buffer = buffer->buffers[cpu_id];
83f40318 2097
438ced17
VN
2098 if (nr_pages == cpu_buffer->nr_pages)
2099 goto out;
7a8e76a3 2100
07b8b10e
SRV
2101 /*
2102 * Don't succeed if resizing is disabled, as a reader might be
2103 * manipulating the ring buffer and is expecting a sane state while
2104 * this is true.
2105 */
2106 if (atomic_read(&cpu_buffer->resize_disabled)) {
2107 err = -EBUSY;
2108 goto out_err_unlock;
2109 }
2110
438ced17
VN
2111 cpu_buffer->nr_pages_to_update = nr_pages -
2112 cpu_buffer->nr_pages;
2113
2114 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2115 if (cpu_buffer->nr_pages_to_update > 0 &&
74e2afc6
QH
2116 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2117 &cpu_buffer->new_pages)) {
83f40318
VN
2118 err = -ENOMEM;
2119 goto out_err;
2120 }
438ced17 2121
99c37d1a 2122 cpus_read_lock();
83f40318 2123
021c5b34
CM
2124 /* Can't run something on an offline CPU. */
2125 if (!cpu_online(cpu_id))
f5eb5588
SRRH
2126 rb_update_pages(cpu_buffer);
2127 else {
88ca6a71
SR
2128 /* Run directly if possible. */
2129 migrate_disable();
2130 if (cpu_id == smp_processor_id()) {
2131 rb_update_pages(cpu_buffer);
2132 migrate_enable();
2133 } else {
2134 migrate_enable();
2135 schedule_work_on(cpu_id,
2136 &cpu_buffer->update_pages_work);
2137 wait_for_completion(&cpu_buffer->update_done);
2138 }
f5eb5588 2139 }
83f40318 2140
83f40318 2141 cpu_buffer->nr_pages_to_update = 0;
99c37d1a 2142 cpus_read_unlock();
438ced17 2143 }
7a8e76a3
SR
2144
2145 out:
659f451f
SR
2146 /*
2147 * The ring buffer resize can happen with the ring buffer
2148 * enabled, so that the update disturbs the tracing as little
2149 * as possible. But if the buffer is disabled, we do not need
2150 * to worry about that, and we can take the time to verify
2151 * that the buffer is not corrupt.
2152 */
2153 if (atomic_read(&buffer->record_disabled)) {
2154 atomic_inc(&buffer->record_disabled);
2155 /*
2156 * Even though the buffer was disabled, we must make sure
2157 * that it is truly disabled before calling rb_check_pages.
2158 * There could have been a race between checking
2159 * record_disable and incrementing it.
2160 */
74401729 2161 synchronize_rcu();
659f451f
SR
2162 for_each_buffer_cpu(buffer, cpu) {
2163 cpu_buffer = buffer->buffers[cpu];
2164 rb_check_pages(cpu_buffer);
2165 }
2166 atomic_dec(&buffer->record_disabled);
2167 }
2168
8a96c028 2169 atomic_dec(&buffer->resizing);
7a8e76a3 2170 mutex_unlock(&buffer->mutex);
0a1754b2 2171 return 0;
7a8e76a3 2172
83f40318 2173 out_err:
438ced17
VN
2174 for_each_buffer_cpu(buffer, cpu) {
2175 struct buffer_page *bpage, *tmp;
83f40318 2176
438ced17 2177 cpu_buffer = buffer->buffers[cpu];
438ced17 2178 cpu_buffer->nr_pages_to_update = 0;
83f40318 2179
438ced17
VN
2180 if (list_empty(&cpu_buffer->new_pages))
2181 continue;
83f40318 2182
438ced17
VN
2183 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2184 list) {
2185 list_del_init(&bpage->list);
2186 free_buffer_page(bpage);
2187 }
7a8e76a3 2188 }
07b8b10e 2189 out_err_unlock:
8a96c028 2190 atomic_dec(&buffer->resizing);
641d2f63 2191 mutex_unlock(&buffer->mutex);
83f40318 2192 return err;
7a8e76a3 2193}
c4f50183 2194EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 2195
13292494 2196void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
750912fa
DS
2197{
2198 mutex_lock(&buffer->mutex);
2199 if (val)
2200 buffer->flags |= RB_FL_OVERWRITE;
2201 else
2202 buffer->flags &= ~RB_FL_OVERWRITE;
2203 mutex_unlock(&buffer->mutex);
2204}
2205EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2206
2289d567 2207static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 2208{
044fa782 2209 return bpage->page->data + index;
7a8e76a3
SR
2210}
2211
2289d567 2212static __always_inline struct ring_buffer_event *
d769041f 2213rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2214{
6f807acd
SR
2215 return __rb_page_index(cpu_buffer->reader_page,
2216 cpu_buffer->reader_page->read);
2217}
2218
785888c5
SRV
2219static struct ring_buffer_event *
2220rb_iter_head_event(struct ring_buffer_iter *iter)
bf41a158 2221{
785888c5
SRV
2222 struct ring_buffer_event *event;
2223 struct buffer_page *iter_head_page = iter->head_page;
2224 unsigned long commit;
2225 unsigned length;
2226
153368ce
SRV
2227 if (iter->head != iter->next_event)
2228 return iter->event;
2229
785888c5
SRV
2230 /*
2231 * When the writer goes across pages, it issues a cmpxchg which
2232 * is a mb(), which will synchronize with the rmb here.
2233 * (see rb_tail_page_update() and __rb_reserve_next())
2234 */
2235 commit = rb_page_commit(iter_head_page);
2236 smp_rmb();
95a404bd
SRG
2237
2238 /* An event needs to be at least 8 bytes in size */
2239 if (iter->head > commit - 8)
2240 goto reset;
2241
785888c5
SRV
2242 event = __rb_page_index(iter_head_page, iter->head);
2243 length = rb_event_length(event);
2244
2245 /*
2246 * READ_ONCE() doesn't work on functions and we don't want the
2247 * compiler doing any crazy optimizations with length.
2248 */
2249 barrier();
2250
139f8400 2251 if ((iter->head + length) > commit || length > iter->event_size)
785888c5
SRV
2252 /* Writer corrupted the read? */
2253 goto reset;
2254
2255 memcpy(iter->event, event, length);
2256 /*
2257 * If the page stamp is still the same after this rmb() then the
2258 * event was safely copied without the writer entering the page.
2259 */
2260 smp_rmb();
2261
2262 /* Make sure the page didn't change since we read this */
2263 if (iter->page_stamp != iter_head_page->page->time_stamp ||
2264 commit > rb_page_commit(iter_head_page))
2265 goto reset;
2266
2267 iter->next_event = iter->head + length;
2268 return iter->event;
2269 reset:
2270 /* Reset to the beginning */
2271 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2272 iter->head = 0;
2273 iter->next_event = 0;
c9b7a4a7 2274 iter->missed_events = 1;
785888c5 2275 return NULL;
bf41a158
SR
2276}
2277
25985edc 2278/* Size is determined by what has been committed */
2289d567 2279static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
bf41a158
SR
2280{
2281 return rb_page_commit(bpage);
2282}
2283
2289d567 2284static __always_inline unsigned
bf41a158
SR
2285rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2286{
2287 return rb_page_commit(cpu_buffer->commit_page);
2288}
2289
2289d567 2290static __always_inline unsigned
3cb30911 2291rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
bf41a158
SR
2292{
2293 unsigned long addr = (unsigned long)event;
2294
3cb30911
SRG
2295 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
2296
2297 return addr - BUF_PAGE_HDR_SIZE;
bf41a158
SR
2298}
2299
34a148bf 2300static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
2301{
2302 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2303
2304 /*
2305 * The iterator could be on the reader page (it starts there).
2306 * But the head could have moved, since the reader was
2307 * found. Check for this case and assign the iterator
2308 * to the head page instead of next.
2309 */
2310 if (iter->head_page == cpu_buffer->reader_page)
77ae365e 2311 iter->head_page = rb_set_head_page(cpu_buffer);
d769041f 2312 else
6689bed3 2313 rb_inc_page(&iter->head_page);
d769041f 2314
28e3fc56 2315 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3 2316 iter->head = 0;
785888c5 2317 iter->next_event = 0;
7a8e76a3
SR
2318}
2319
77ae365e
SR
2320/*
2321 * rb_handle_head_page - writer hit the head page
2322 *
2323 * Returns: +1 to retry page
2324 * 0 to continue
2325 * -1 on error
2326 */
2327static int
2328rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2329 struct buffer_page *tail_page,
2330 struct buffer_page *next_page)
2331{
2332 struct buffer_page *new_head;
2333 int entries;
2334 int type;
2335 int ret;
2336
2337 entries = rb_page_entries(next_page);
2338
2339 /*
2340 * The hard part is here. We need to move the head
2341 * forward, and protect against both readers on
2342 * other CPUs and writers coming in via interrupts.
2343 */
2344 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2345 RB_PAGE_HEAD);
2346
2347 /*
2348 * type can be one of four:
2349 * NORMAL - an interrupt already moved it for us
2350 * HEAD - we are the first to get here.
2351 * UPDATE - we are the interrupt interrupting
2352 * a current move.
2353 * MOVED - a reader on another CPU moved the next
2354 * pointer to its reader page. Give up
2355 * and try again.
2356 */
2357
2358 switch (type) {
2359 case RB_PAGE_HEAD:
2360 /*
2361 * We changed the head to UPDATE, thus
2362 * it is our responsibility to update
2363 * the counters.
2364 */
2365 local_add(entries, &cpu_buffer->overrun);
45d99ea4 2366 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
31029a8b 2367 local_inc(&cpu_buffer->pages_lost);
77ae365e
SR
2368
2369 /*
2370 * The entries will be zeroed out when we move the
2371 * tail page.
2372 */
2373
2374 /* still more to do */
2375 break;
2376
2377 case RB_PAGE_UPDATE:
2378 /*
2379 * This is an interrupt that interrupt the
2380 * previous update. Still more to do.
2381 */
2382 break;
2383 case RB_PAGE_NORMAL:
2384 /*
2385 * An interrupt came in before the update
2386 * and processed this for us.
2387 * Nothing left to do.
2388 */
2389 return 1;
2390 case RB_PAGE_MOVED:
2391 /*
2392 * The reader is on another CPU and just did
2393 * a swap with our next_page.
2394 * Try again.
2395 */
2396 return 1;
2397 default:
2398 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2399 return -1;
2400 }
2401
2402 /*
2403 * Now that we are here, the old head pointer is
2404 * set to UPDATE. This will keep the reader from
2405 * swapping the head page with the reader page.
2406 * The reader (on another CPU) will spin till
2407 * we are finished.
2408 *
2409 * We just need to protect against interrupts
2410 * doing the job. We will set the next pointer
2411 * to HEAD. After that, we set the old pointer
2412 * to NORMAL, but only if it was HEAD before.
2413 * otherwise we are an interrupt, and only
2414 * want the outer most commit to reset it.
2415 */
2416 new_head = next_page;
6689bed3 2417 rb_inc_page(&new_head);
77ae365e
SR
2418
2419 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2420 RB_PAGE_NORMAL);
2421
2422 /*
2423 * Valid returns are:
2424 * HEAD - an interrupt came in and already set it.
2425 * NORMAL - One of two things:
2426 * 1) We really set it.
2427 * 2) A bunch of interrupts came in and moved
2428 * the page forward again.
2429 */
2430 switch (ret) {
2431 case RB_PAGE_HEAD:
2432 case RB_PAGE_NORMAL:
2433 /* OK */
2434 break;
2435 default:
2436 RB_WARN_ON(cpu_buffer, 1);
2437 return -1;
2438 }
2439
2440 /*
2441 * It is possible that an interrupt came in,
2442 * set the head up, then more interrupts came in
2443 * and moved it again. When we get back here,
2444 * the page would have been set to NORMAL but we
2445 * just set it back to HEAD.
2446 *
2447 * How do you detect this? Well, if that happened
2448 * the tail page would have moved.
2449 */
2450 if (ret == RB_PAGE_NORMAL) {
8573636e
SRRH
2451 struct buffer_page *buffer_tail_page;
2452
2453 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
77ae365e
SR
2454 /*
2455 * If the tail had moved passed next, then we need
2456 * to reset the pointer.
2457 */
8573636e
SRRH
2458 if (buffer_tail_page != tail_page &&
2459 buffer_tail_page != next_page)
77ae365e
SR
2460 rb_head_page_set_normal(cpu_buffer, new_head,
2461 next_page,
2462 RB_PAGE_HEAD);
2463 }
2464
2465 /*
2466 * If this was the outer most commit (the one that
2467 * changed the original pointer from HEAD to UPDATE),
2468 * then it is up to us to reset it to NORMAL.
2469 */
2470 if (type == RB_PAGE_HEAD) {
2471 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2472 tail_page,
2473 RB_PAGE_UPDATE);
2474 if (RB_WARN_ON(cpu_buffer,
2475 ret != RB_PAGE_UPDATE))
2476 return -1;
2477 }
2478
2479 return 0;
2480}
2481
c7b09308
SR
2482static inline void
2483rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
fcc742ea 2484 unsigned long tail, struct rb_event_info *info)
c7b09308 2485{
139f8400 2486 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
fcc742ea 2487 struct buffer_page *tail_page = info->tail_page;
c7b09308 2488 struct ring_buffer_event *event;
fcc742ea 2489 unsigned long length = info->length;
c7b09308
SR
2490
2491 /*
2492 * Only the event that crossed the page boundary
2493 * must fill the old tail_page with padding.
2494 */
139f8400 2495 if (tail >= bsize) {
b3230c8b
SR
2496 /*
2497 * If the page was filled, then we still need
2498 * to update the real_end. Reset it to zero
2499 * and the reader will ignore it.
2500 */
139f8400 2501 if (tail == bsize)
b3230c8b
SR
2502 tail_page->real_end = 0;
2503
c7b09308
SR
2504 local_sub(length, &tail_page->write);
2505 return;
2506 }
2507
2508 event = __rb_page_index(tail_page, tail);
2509
ff0ff84a
SR
2510 /*
2511 * Save the original length to the meta data.
2512 * This will be used by the reader to add lost event
2513 * counter.
2514 */
2515 tail_page->real_end = tail;
2516
c7b09308
SR
2517 /*
2518 * If this event is bigger than the minimum size, then
2519 * we need to be careful that we don't subtract the
2520 * write counter enough to allow another writer to slip
2521 * in on this page.
2522 * We put in a discarded commit instead, to make sure
45d99ea4
ZY
2523 * that this space is not used again, and this space will
2524 * not be accounted into 'entries_bytes'.
c7b09308
SR
2525 *
2526 * If we are less than the minimum size, we don't need to
2527 * worry about it.
2528 */
139f8400 2529 if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
c7b09308
SR
2530 /* No room for any events */
2531
2532 /* Mark the rest of the page with padding */
2533 rb_event_set_padding(event);
2534
a0fcaaed
SRG
2535 /* Make sure the padding is visible before the write update */
2536 smp_wmb();
2537
c7b09308
SR
2538 /* Set the write back to the previous setting */
2539 local_sub(length, &tail_page->write);
2540 return;
2541 }
2542
2543 /* Put in a discarded event */
139f8400 2544 event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
c7b09308
SR
2545 event->type_len = RINGBUF_TYPE_PADDING;
2546 /* time delta must be non zero */
2547 event->time_delta = 1;
c7b09308 2548
45d99ea4 2549 /* account for padding bytes */
139f8400 2550 local_add(bsize - tail, &cpu_buffer->entries_bytes);
45d99ea4 2551
a0fcaaed
SRG
2552 /* Make sure the padding is visible before the tail_page->write update */
2553 smp_wmb();
2554
c7b09308 2555 /* Set write to end of buffer */
139f8400 2556 length = (tail + length) - bsize;
c7b09308
SR
2557 local_sub(length, &tail_page->write);
2558}
6634ff26 2559
4239c38f
SRRH
2560static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2561
747e94ae
SR
2562/*
2563 * This is the slow path, force gcc not to inline it.
2564 */
2565static noinline struct ring_buffer_event *
6634ff26 2566rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
fcc742ea 2567 unsigned long tail, struct rb_event_info *info)
7a8e76a3 2568{
fcc742ea 2569 struct buffer_page *tail_page = info->tail_page;
5a50e33c 2570 struct buffer_page *commit_page = cpu_buffer->commit_page;
13292494 2571 struct trace_buffer *buffer = cpu_buffer->buffer;
77ae365e
SR
2572 struct buffer_page *next_page;
2573 int ret;
aa20ae84
SR
2574
2575 next_page = tail_page;
2576
6689bed3 2577 rb_inc_page(&next_page);
aa20ae84 2578
aa20ae84
SR
2579 /*
2580 * If for some reason, we had an interrupt storm that made
2581 * it all the way around the buffer, bail, and warn
2582 * about it.
2583 */
2584 if (unlikely(next_page == commit_page)) {
77ae365e 2585 local_inc(&cpu_buffer->commit_overrun);
aa20ae84
SR
2586 goto out_reset;
2587 }
2588
77ae365e
SR
2589 /*
2590 * This is where the fun begins!
2591 *
2592 * We are fighting against races between a reader that
2593 * could be on another CPU trying to swap its reader
2594 * page with the buffer head.
2595 *
2596 * We are also fighting against interrupts coming in and
2597 * moving the head or tail on us as well.
2598 *
2599 * If the next page is the head page then we have filled
2600 * the buffer, unless the commit page is still on the
2601 * reader page.
2602 */
6689bed3 2603 if (rb_is_head_page(next_page, &tail_page->list)) {
aa20ae84 2604
77ae365e
SR
2605 /*
2606 * If the commit is not on the reader page, then
2607 * move the header page.
2608 */
2609 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2610 /*
2611 * If we are not in overwrite mode,
2612 * this is easy, just stop here.
2613 */
884bfe89
SP
2614 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2615 local_inc(&cpu_buffer->dropped_events);
77ae365e 2616 goto out_reset;
884bfe89 2617 }
77ae365e
SR
2618
2619 ret = rb_handle_head_page(cpu_buffer,
2620 tail_page,
2621 next_page);
2622 if (ret < 0)
2623 goto out_reset;
2624 if (ret)
2625 goto out_again;
2626 } else {
2627 /*
2628 * We need to be careful here too. The
2629 * commit page could still be on the reader
2630 * page. We could have a small buffer, and
2631 * have filled up the buffer with events
2632 * from interrupts and such, and wrapped.
2633 *
c6358bac 2634 * Note, if the tail page is also on the
77ae365e
SR
2635 * reader_page, we let it move out.
2636 */
2637 if (unlikely((cpu_buffer->commit_page !=
2638 cpu_buffer->tail_page) &&
2639 (cpu_buffer->commit_page ==
2640 cpu_buffer->reader_page))) {
2641 local_inc(&cpu_buffer->commit_overrun);
2642 goto out_reset;
2643 }
aa20ae84
SR
2644 }
2645 }
2646
70004986 2647 rb_tail_page_update(cpu_buffer, tail_page, next_page);
aa20ae84 2648
77ae365e 2649 out_again:
aa20ae84 2650
fcc742ea 2651 rb_reset_tail(cpu_buffer, tail, info);
aa20ae84 2652
4239c38f
SRRH
2653 /* Commit what we have for now. */
2654 rb_end_commit(cpu_buffer);
2655 /* rb_end_commit() decs committing */
2656 local_inc(&cpu_buffer->committing);
2657
aa20ae84
SR
2658 /* fail and let the caller try again */
2659 return ERR_PTR(-EAGAIN);
2660
45141d46 2661 out_reset:
6f3b3440 2662 /* reset write */
fcc742ea 2663 rb_reset_tail(cpu_buffer, tail, info);
6f3b3440 2664
bf41a158 2665 return NULL;
7a8e76a3
SR
2666}
2667
74e87937
SRV
2668/* Slow path */
2669static struct ring_buffer_event *
3cb30911
SRG
2670rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2671 struct ring_buffer_event *event, u64 delta, bool abs)
9826b273 2672{
dc4e2801
TZ
2673 if (abs)
2674 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2675 else
2676 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
9826b273 2677
dc4e2801 2678 /* Not the first event on the page, or not delta? */
3cb30911 2679 if (abs || rb_event_index(cpu_buffer, event)) {
d90fd774
SRRH
2680 event->time_delta = delta & TS_MASK;
2681 event->array[0] = delta >> TS_SHIFT;
2682 } else {
2683 /* nope, just zero it */
2684 event->time_delta = 0;
2685 event->array[0] = 0;
2686 }
a4543a2f 2687
d90fd774
SRRH
2688 return skip_time_extend(event);
2689}
a4543a2f 2690
58fbc3c6
SRV
2691#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2692static inline bool sched_clock_stable(void)
2693{
2694 return true;
2695}
2696#endif
2697
74e87937 2698static void
58fbc3c6
SRV
2699rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2700 struct rb_event_info *info)
2701{
2702 u64 write_stamp;
2703
29ce2451 2704 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
58fbc3c6
SRV
2705 (unsigned long long)info->delta,
2706 (unsigned long long)info->ts,
2707 (unsigned long long)info->before,
2708 (unsigned long long)info->after,
c84897c0 2709 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
58fbc3c6
SRV
2710 sched_clock_stable() ? "" :
2711 "If you just came from a suspend/resume,\n"
2712 "please switch to the trace global clock:\n"
2455f0e1 2713 " echo global > /sys/kernel/tracing/trace_clock\n"
58fbc3c6
SRV
2714 "or add trace_clock=global to the kernel command line\n");
2715}
2716
74e87937
SRV
2717static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2718 struct ring_buffer_event **event,
2719 struct rb_event_info *info,
2720 u64 *delta,
2721 unsigned int *length)
2722{
2723 bool abs = info->add_timestamp &
2724 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2725
29ce2451 2726 if (unlikely(info->delta > (1ULL << 59))) {
6695da58
SRG
2727 /*
2728 * Some timers can use more than 59 bits, and when a timestamp
2729 * is added to the buffer, it will lose those bits.
2730 */
2731 if (abs && (info->ts & TS_MSB)) {
2732 info->delta &= ABS_TS_MASK;
2733
29ce2451 2734 /* did the clock go backwards */
6695da58 2735 } else if (info->before == info->after && info->before > info->ts) {
29ce2451
SRV
2736 /* not interrupted */
2737 static int once;
2738
2739 /*
2740 * This is possible with a recalibrating of the TSC.
2741 * Do not produce a call stack, but just report it.
2742 */
2743 if (!once) {
2744 once++;
2745 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2746 info->before, info->ts);
2747 }
2748 } else
2749 rb_check_timestamp(cpu_buffer, info);
2750 if (!abs)
2751 info->delta = 0;
2752 }
3cb30911 2753 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
74e87937
SRV
2754 *length -= RB_LEN_TIME_EXTEND;
2755 *delta = 0;
2756}
2757
d90fd774
SRRH
2758/**
2759 * rb_update_event - update event type and data
cfc585a4 2760 * @cpu_buffer: The per cpu buffer of the @event
d90fd774 2761 * @event: the event to update
cfc585a4 2762 * @info: The info to update the @event with (contains length and delta)
d90fd774 2763 *
cfc585a4 2764 * Update the type and data fields of the @event. The length
d90fd774
SRRH
2765 * is the actual size that is written to the ring buffer,
2766 * and with this, we can determine what to place into the
2767 * data field.
2768 */
b7dc42fd 2769static void
d90fd774
SRRH
2770rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2771 struct ring_buffer_event *event,
2772 struct rb_event_info *info)
2773{
2774 unsigned length = info->length;
2775 u64 delta = info->delta;
8672e494
SRV
2776 unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2777
a948c69d 2778 if (!WARN_ON_ONCE(nest >= MAX_NEST))
8672e494 2779 cpu_buffer->event_stamp[nest] = info->ts;
a4543a2f
SRRH
2780
2781 /*
d90fd774 2782 * If we need to add a timestamp, then we
6167c205 2783 * add it to the start of the reserved space.
a4543a2f 2784 */
74e87937
SRV
2785 if (unlikely(info->add_timestamp))
2786 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
a4543a2f 2787
d90fd774
SRRH
2788 event->time_delta = delta;
2789 length -= RB_EVNT_HDR_SIZE;
adab66b7 2790 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
d90fd774
SRRH
2791 event->type_len = 0;
2792 event->array[0] = length;
2793 } else
2794 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2795}
2796
2797static unsigned rb_calculate_event_length(unsigned length)
2798{
2799 struct ring_buffer_event event; /* Used only for sizeof array */
2800
2801 /* zero length can cause confusions */
2802 if (!length)
2803 length++;
2804
adab66b7 2805 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
d90fd774
SRRH
2806 length += sizeof(event.array[0]);
2807
2808 length += RB_EVNT_HDR_SIZE;
adab66b7 2809 length = ALIGN(length, RB_ARCH_ALIGNMENT);
d90fd774
SRRH
2810
2811 /*
2812 * In case the time delta is larger than the 27 bits for it
2813 * in the header, we need to add a timestamp. If another
2814 * event comes in when trying to discard this one to increase
2815 * the length, then the timestamp will be added in the allocated
2816 * space of this event. If length is bigger than the size needed
2817 * for the TIME_EXTEND, then padding has to be used. The events
2818 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2819 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2820 * As length is a multiple of 4, we only need to worry if it
2821 * is 12 (RB_LEN_TIME_EXTEND + 4).
2822 */
2823 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2824 length += RB_ALIGNMENT;
2825
2826 return length;
2827}
2828
bc92b956 2829static inline bool
d90fd774
SRRH
2830rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2831 struct ring_buffer_event *event)
2832{
2833 unsigned long new_index, old_index;
2834 struct buffer_page *bpage;
d90fd774
SRRH
2835 unsigned long addr;
2836
3cb30911 2837 new_index = rb_event_index(cpu_buffer, event);
d90fd774
SRRH
2838 old_index = new_index + rb_event_ts_length(event);
2839 addr = (unsigned long)event;
3cb30911 2840 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
d90fd774 2841
8573636e 2842 bpage = READ_ONCE(cpu_buffer->tail_page);
d90fd774 2843
083e9f65
SRG
2844 /*
2845 * Make sure the tail_page is still the same and
2846 * the next write location is the end of this event
2847 */
d90fd774
SRRH
2848 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2849 unsigned long write_mask =
2850 local_read(&bpage->write) & ~RB_WRITE_MASK;
2851 unsigned long event_length = rb_event_length(event);
a389d86f 2852
b2dd7975
SRG
2853 /*
2854 * For the before_stamp to be different than the write_stamp
2855 * to make sure that the next event adds an absolute
2856 * value and does not rely on the saved write stamp, which
2857 * is now going to be bogus.
083e9f65
SRG
2858 *
2859 * By setting the before_stamp to zero, the next event
2860 * is not going to use the write_stamp and will instead
2861 * create an absolute timestamp. This means there's no
2862 * reason to update the wirte_stamp!
b2dd7975
SRG
2863 */
2864 rb_time_set(&cpu_buffer->before_stamp, 0);
2865
a389d86f
SRV
2866 /*
2867 * If an event were to come in now, it would see that the
2868 * write_stamp and the before_stamp are different, and assume
2869 * that this event just added itself before updating
2870 * the write stamp. The interrupting event will fix the
083e9f65 2871 * write stamp for us, and use an absolute timestamp.
a389d86f
SRV
2872 */
2873
d90fd774
SRRH
2874 /*
2875 * This is on the tail page. It is possible that
2876 * a write could come in and move the tail page
2877 * and write to the next page. That is fine
2878 * because we just shorten what is on this page.
2879 */
2880 old_index += write_mask;
2881 new_index += write_mask;
00a8478f
UB
2882
2883 /* caution: old_index gets updated on cmpxchg failure */
2884 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
d90fd774
SRRH
2885 /* update counters */
2886 local_sub(event_length, &cpu_buffer->entries_bytes);
bc92b956 2887 return true;
d90fd774
SRRH
2888 }
2889 }
2890
2891 /* could not discard */
bc92b956 2892 return false;
d90fd774
SRRH
2893}
2894
2895static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2896{
2897 local_inc(&cpu_buffer->committing);
2898 local_inc(&cpu_buffer->commits);
2899}
2900
38e11df1 2901static __always_inline void
d90fd774
SRRH
2902rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2903{
2904 unsigned long max_count;
2905
2906 /*
2907 * We only race with interrupts and NMIs on this CPU.
2908 * If we own the commit event, then we can commit
2909 * all others that interrupted us, since the interruptions
2910 * are in stack format (they finish before they come
2911 * back to us). This allows us to do a simple loop to
2912 * assign the commit to the tail.
2913 */
2914 again:
2915 max_count = cpu_buffer->nr_pages * 100;
2916
8573636e 2917 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
d90fd774
SRRH
2918 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2919 return;
2920 if (RB_WARN_ON(cpu_buffer,
2921 rb_is_reader_page(cpu_buffer->tail_page)))
2922 return;
6455b616
ZY
2923 /*
2924 * No need for a memory barrier here, as the update
2925 * of the tail_page did it for this page.
2926 */
d90fd774
SRRH
2927 local_set(&cpu_buffer->commit_page->page->commit,
2928 rb_page_write(cpu_buffer->commit_page));
6689bed3 2929 rb_inc_page(&cpu_buffer->commit_page);
d90fd774
SRRH
2930 /* add barrier to keep gcc from optimizing too much */
2931 barrier();
2932 }
2933 while (rb_commit_index(cpu_buffer) !=
2934 rb_page_write(cpu_buffer->commit_page)) {
2935
6455b616
ZY
2936 /* Make sure the readers see the content of what is committed. */
2937 smp_wmb();
d90fd774
SRRH
2938 local_set(&cpu_buffer->commit_page->page->commit,
2939 rb_page_write(cpu_buffer->commit_page));
2940 RB_WARN_ON(cpu_buffer,
2941 local_read(&cpu_buffer->commit_page->page->commit) &
2942 ~RB_WRITE_MASK);
2943 barrier();
2944 }
2945
2946 /* again, keep gcc from optimizing */
2947 barrier();
2948
2949 /*
2950 * If an interrupt came in just after the first while loop
2951 * and pushed the tail page forward, we will be left with
2952 * a dangling commit that will never go forward.
2953 */
8573636e 2954 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
d90fd774
SRRH
2955 goto again;
2956}
2957
38e11df1 2958static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
d90fd774
SRRH
2959{
2960 unsigned long commits;
2961
2962 if (RB_WARN_ON(cpu_buffer,
2963 !local_read(&cpu_buffer->committing)))
2964 return;
2965
2966 again:
2967 commits = local_read(&cpu_buffer->commits);
2968 /* synchronize with interrupts */
2969 barrier();
2970 if (local_read(&cpu_buffer->committing) == 1)
2971 rb_set_commit_to_write(cpu_buffer);
2972
2973 local_dec(&cpu_buffer->committing);
2974
2975 /* synchronize with interrupts */
2976 barrier();
2977
2978 /*
2979 * Need to account for interrupts coming in between the
2980 * updating of the commit page and the clearing of the
2981 * committing counter.
2982 */
2983 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2984 !local_read(&cpu_buffer->committing)) {
2985 local_inc(&cpu_buffer->committing);
2986 goto again;
2987 }
2988}
2989
2990static inline void rb_event_discard(struct ring_buffer_event *event)
2991{
dc4e2801 2992 if (extended_time(event))
d90fd774
SRRH
2993 event = skip_time_extend(event);
2994
2995 /* array[0] holds the actual length for the discarded event */
2996 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2997 event->type_len = RINGBUF_TYPE_PADDING;
2998 /* time delta must be non zero */
2999 if (!event->time_delta)
3000 event->time_delta = 1;
3001}
3002
04aabc32 3003static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
d90fd774
SRRH
3004{
3005 local_inc(&cpu_buffer->entries);
d90fd774
SRRH
3006 rb_end_commit(cpu_buffer);
3007}
3008
3009static __always_inline void
13292494 3010rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
d90fd774 3011{
d90fd774
SRRH
3012 if (buffer->irq_work.waiters_pending) {
3013 buffer->irq_work.waiters_pending = false;
3014 /* irq_work_queue() supplies it's own memory barriers */
3015 irq_work_queue(&buffer->irq_work.work);
3016 }
3017
3018 if (cpu_buffer->irq_work.waiters_pending) {
3019 cpu_buffer->irq_work.waiters_pending = false;
3020 /* irq_work_queue() supplies it's own memory barriers */
3021 irq_work_queue(&cpu_buffer->irq_work.work);
3022 }
3023
03329f99
SRV
3024 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3025 return;
d90fd774 3026
03329f99
SRV
3027 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3028 return;
2c2b0a78 3029
03329f99
SRV
3030 if (!cpu_buffer->irq_work.full_waiters_pending)
3031 return;
2c2b0a78 3032
03329f99
SRV
3033 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3034
42fb0a1e 3035 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
03329f99
SRV
3036 return;
3037
3038 cpu_buffer->irq_work.wakeup_full = true;
3039 cpu_buffer->irq_work.full_waiters_pending = false;
3040 /* irq_work_queue() supplies it's own memory barriers */
3041 irq_work_queue(&cpu_buffer->irq_work.work);
d90fd774
SRRH
3042}
3043
28575c61
SRV
3044#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3045# define do_ring_buffer_record_recursion() \
3046 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3047#else
3048# define do_ring_buffer_record_recursion() do { } while (0)
3049#endif
3050
d90fd774
SRRH
3051/*
3052 * The lock and unlock are done within a preempt disable section.
3053 * The current_context per_cpu variable can only be modified
3054 * by the current task between lock and unlock. But it can
a0e3a18f
SRV
3055 * be modified more than once via an interrupt. To pass this
3056 * information from the lock to the unlock without having to
3057 * access the 'in_interrupt()' functions again (which do show
3058 * a bit of overhead in something as critical as function tracing,
3059 * we use a bitmask trick.
d90fd774 3060 *
b02414c8
SRV
3061 * bit 1 = NMI context
3062 * bit 2 = IRQ context
3063 * bit 3 = SoftIRQ context
3064 * bit 4 = normal context.
d90fd774 3065 *
a0e3a18f
SRV
3066 * This works because this is the order of contexts that can
3067 * preempt other contexts. A SoftIRQ never preempts an IRQ
3068 * context.
3069 *
3070 * When the context is determined, the corresponding bit is
3071 * checked and set (if it was set, then a recursion of that context
3072 * happened).
3073 *
3074 * On unlock, we need to clear this bit. To do so, just subtract
3075 * 1 from the current_context and AND it to itself.
3076 *
3077 * (binary)
3078 * 101 - 1 = 100
3079 * 101 & 100 = 100 (clearing bit zero)
3080 *
3081 * 1010 - 1 = 1001
3082 * 1010 & 1001 = 1000 (clearing bit 1)
3083 *
3084 * The least significant bit can be cleared this way, and it
3085 * just so happens that it is the same bit corresponding to
3086 * the current context.
b02414c8
SRV
3087 *
3088 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3089 * is set when a recursion is detected at the current context, and if
3090 * the TRANSITION bit is already set, it will fail the recursion.
3091 * This is needed because there's a lag between the changing of
3092 * interrupt context and updating the preempt count. In this case,
3093 * a false positive will be found. To handle this, one extra recursion
3094 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3095 * bit is already set, then it is considered a recursion and the function
3096 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3097 *
3098 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3099 * to be cleared. Even if it wasn't the context that set it. That is,
3100 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3101 * is called before preempt_count() is updated, since the check will
3102 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3103 * NMI then comes in, it will set the NMI bit, but when the NMI code
f2cc020d 3104 * does the trace_recursive_unlock() it will clear the TRANSITION bit
b02414c8
SRV
3105 * and leave the NMI bit set. But this is fine, because the interrupt
3106 * code that set the TRANSITION bit will then clear the NMI bit when it
3107 * calls trace_recursive_unlock(). If another NMI comes in, it will
3108 * set the TRANSITION bit and continue.
3109 *
3110 * Note: The TRANSITION bit only handles a single transition between context.
d90fd774
SRRH
3111 */
3112
bc92b956 3113static __always_inline bool
d90fd774
SRRH
3114trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3115{
a0e3a18f 3116 unsigned int val = cpu_buffer->current_context;
91ebe8bc 3117 int bit = interrupt_context_level();
9b84fadc
SRV
3118
3119 bit = RB_CTX_NORMAL - bit;
a0e3a18f 3120
b02414c8
SRV
3121 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3122 /*
3123 * It is possible that this was called by transitioning
3124 * between interrupt context, and preempt_count() has not
3125 * been updated yet. In this case, use the TRANSITION bit.
3126 */
3127 bit = RB_CTX_TRANSITION;
28575c61
SRV
3128 if (val & (1 << (bit + cpu_buffer->nest))) {
3129 do_ring_buffer_record_recursion();
bc92b956 3130 return true;
28575c61 3131 }
b02414c8 3132 }
d90fd774 3133
8e012066 3134 val |= (1 << (bit + cpu_buffer->nest));
a0e3a18f 3135 cpu_buffer->current_context = val;
d90fd774 3136
bc92b956 3137 return false;
d90fd774
SRRH
3138}
3139
3140static __always_inline void
3141trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3142{
8e012066
SRV
3143 cpu_buffer->current_context &=
3144 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3145}
3146
b02414c8
SRV
3147/* The recursive locking above uses 5 bits */
3148#define NESTED_BITS 5
8e012066
SRV
3149
3150/**
3151 * ring_buffer_nest_start - Allow to trace while nested
3152 * @buffer: The ring buffer to modify
3153 *
6167c205 3154 * The ring buffer has a safety mechanism to prevent recursion.
8e012066
SRV
3155 * But there may be a case where a trace needs to be done while
3156 * tracing something else. In this case, calling this function
3157 * will allow this function to nest within a currently active
3158 * ring_buffer_lock_reserve().
3159 *
3160 * Call this function before calling another ring_buffer_lock_reserve() and
3161 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3162 */
13292494 3163void ring_buffer_nest_start(struct trace_buffer *buffer)
8e012066
SRV
3164{
3165 struct ring_buffer_per_cpu *cpu_buffer;
3166 int cpu;
3167
3168 /* Enabled by ring_buffer_nest_end() */
3169 preempt_disable_notrace();
3170 cpu = raw_smp_processor_id();
3171 cpu_buffer = buffer->buffers[cpu];
6167c205 3172 /* This is the shift value for the above recursive locking */
8e012066
SRV
3173 cpu_buffer->nest += NESTED_BITS;
3174}
3175
3176/**
3177 * ring_buffer_nest_end - Allow to trace while nested
3178 * @buffer: The ring buffer to modify
3179 *
3180 * Must be called after ring_buffer_nest_start() and after the
3181 * ring_buffer_unlock_commit().
3182 */
13292494 3183void ring_buffer_nest_end(struct trace_buffer *buffer)
8e012066
SRV
3184{
3185 struct ring_buffer_per_cpu *cpu_buffer;
3186 int cpu;
3187
3188 /* disabled by ring_buffer_nest_start() */
3189 cpu = raw_smp_processor_id();
3190 cpu_buffer = buffer->buffers[cpu];
6167c205 3191 /* This is the shift value for the above recursive locking */
8e012066
SRV
3192 cpu_buffer->nest -= NESTED_BITS;
3193 preempt_enable_notrace();
d90fd774
SRRH
3194}
3195
3196/**
3197 * ring_buffer_unlock_commit - commit a reserved
3198 * @buffer: The buffer to commit to
d90fd774
SRRH
3199 *
3200 * This commits the data to the ring buffer, and releases any locks held.
3201 *
3202 * Must be paired with ring_buffer_lock_reserve.
3203 */
04aabc32 3204int ring_buffer_unlock_commit(struct trace_buffer *buffer)
d90fd774
SRRH
3205{
3206 struct ring_buffer_per_cpu *cpu_buffer;
3207 int cpu = raw_smp_processor_id();
3208
3209 cpu_buffer = buffer->buffers[cpu];
3210
04aabc32 3211 rb_commit(cpu_buffer);
d90fd774
SRRH
3212
3213 rb_wakeups(buffer, cpu_buffer);
3214
3215 trace_recursive_unlock(cpu_buffer);
3216
3217 preempt_enable_notrace();
3218
3219 return 0;
3220}
3221EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3222
5b7be9c7
SRV
3223/* Special value to validate all deltas on a page. */
3224#define CHECK_FULL_PAGE 1L
3225
3226#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
d40dbb61
SRG
3227
3228static const char *show_irq_str(int bits)
3229{
3230 const char *type[] = {
3231 ".", // 0
3232 "s", // 1
3233 "h", // 2
3234 "Hs", // 3
3235 "n", // 4
3236 "Ns", // 5
3237 "Nh", // 6
3238 "NHs", // 7
3239 };
3240
3241 return type[bits];
3242}
3243
3244/* Assume this is an trace event */
3245static const char *show_flags(struct ring_buffer_event *event)
3246{
3247 struct trace_entry *entry;
3248 int bits = 0;
3249
3250 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3251 return "X";
3252
3253 entry = ring_buffer_event_data(event);
3254
3255 if (entry->flags & TRACE_FLAG_SOFTIRQ)
3256 bits |= 1;
3257
3258 if (entry->flags & TRACE_FLAG_HARDIRQ)
3259 bits |= 2;
3260
3261 if (entry->flags & TRACE_FLAG_NMI)
3262 bits |= 4;
3263
3264 return show_irq_str(bits);
3265}
3266
3267static const char *show_irq(struct ring_buffer_event *event)
3268{
3269 struct trace_entry *entry;
3270
3271 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3272 return "";
3273
3274 entry = ring_buffer_event_data(event);
3275 if (entry->flags & TRACE_FLAG_IRQS_OFF)
3276 return "d";
3277 return "";
3278}
3279
3280static const char *show_interrupt_level(void)
3281{
3282 unsigned long pc = preempt_count();
3283 unsigned char level = 0;
3284
3285 if (pc & SOFTIRQ_OFFSET)
3286 level |= 1;
3287
3288 if (pc & HARDIRQ_MASK)
3289 level |= 2;
3290
3291 if (pc & NMI_MASK)
3292 level |= 4;
3293
3294 return show_irq_str(level);
3295}
3296
5b7be9c7
SRV
3297static void dump_buffer_page(struct buffer_data_page *bpage,
3298 struct rb_event_info *info,
3299 unsigned long tail)
3300{
3301 struct ring_buffer_event *event;
3302 u64 ts, delta;
3303 int e;
3304
3305 ts = bpage->time_stamp;
3306 pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
3307
3308 for (e = 0; e < tail; e += rb_event_length(event)) {
3309
3310 event = (struct ring_buffer_event *)(bpage->data + e);
3311
3312 switch (event->type_len) {
3313
3314 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 3315 delta = rb_event_time_stamp(event);
5b7be9c7 3316 ts += delta;
0b9036ef
SRG
3317 pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
3318 e, ts, delta);
5b7be9c7
SRV
3319 break;
3320
3321 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 3322 delta = rb_event_time_stamp(event);
6695da58 3323 ts = rb_fix_abs_ts(delta, ts);
0b9036ef
SRG
3324 pr_warn(" 0x%x: [%lld] absolute:%lld TIME STAMP\n",
3325 e, ts, delta);
5b7be9c7
SRV
3326 break;
3327
3328 case RINGBUF_TYPE_PADDING:
3329 ts += event->time_delta;
0b9036ef
SRG
3330 pr_warn(" 0x%x: [%lld] delta:%d PADDING\n",
3331 e, ts, event->time_delta);
5b7be9c7
SRV
3332 break;
3333
3334 case RINGBUF_TYPE_DATA:
3335 ts += event->time_delta;
d40dbb61
SRG
3336 pr_warn(" 0x%x: [%lld] delta:%d %s%s\n",
3337 e, ts, event->time_delta,
3338 show_flags(event), show_irq(event));
5b7be9c7
SRV
3339 break;
3340
3341 default:
3342 break;
3343 }
3344 }
0b9036ef 3345 pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
5b7be9c7
SRV
3346}
3347
3348static DEFINE_PER_CPU(atomic_t, checking);
3349static atomic_t ts_dump;
3350
f50345b4
SRG
3351#define buffer_warn_return(fmt, ...) \
3352 do { \
3353 /* If another report is happening, ignore this one */ \
3354 if (atomic_inc_return(&ts_dump) != 1) { \
3355 atomic_dec(&ts_dump); \
3356 goto out; \
3357 } \
3358 atomic_inc(&cpu_buffer->record_disabled); \
3359 pr_warn(fmt, ##__VA_ARGS__); \
3360 dump_buffer_page(bpage, info, tail); \
3361 atomic_dec(&ts_dump); \
3362 /* There's some cases in boot up that this can happen */ \
3363 if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING)) \
3364 /* Do not re-enable checking */ \
3365 return; \
3366 } while (0)
3367
5b7be9c7
SRV
3368/*
3369 * Check if the current event time stamp matches the deltas on
3370 * the buffer page.
3371 */
3372static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3373 struct rb_event_info *info,
3374 unsigned long tail)
3375{
3376 struct ring_buffer_event *event;
3377 struct buffer_data_page *bpage;
3378 u64 ts, delta;
3379 bool full = false;
3380 int e;
3381
3382 bpage = info->tail_page->page;
3383
3384 if (tail == CHECK_FULL_PAGE) {
3385 full = true;
3386 tail = local_read(&bpage->commit);
3387 } else if (info->add_timestamp &
3388 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3389 /* Ignore events with absolute time stamps */
3390 return;
3391 }
3392
3393 /*
3394 * Do not check the first event (skip possible extends too).
3395 * Also do not check if previous events have not been committed.
3396 */
3397 if (tail <= 8 || tail > local_read(&bpage->commit))
3398 return;
3399
3400 /*
083e9f65 3401 * If this interrupted another event,
5b7be9c7
SRV
3402 */
3403 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3404 goto out;
3405
3406 ts = bpage->time_stamp;
3407
3408 for (e = 0; e < tail; e += rb_event_length(event)) {
3409
3410 event = (struct ring_buffer_event *)(bpage->data + e);
3411
3412 switch (event->type_len) {
3413
3414 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 3415 delta = rb_event_time_stamp(event);
5b7be9c7
SRV
3416 ts += delta;
3417 break;
3418
3419 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 3420 delta = rb_event_time_stamp(event);
f50345b4
SRG
3421 delta = rb_fix_abs_ts(delta, ts);
3422 if (delta < ts) {
3423 buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
3424 cpu_buffer->cpu, ts, delta);
3425 }
3426 ts = delta;
5b7be9c7
SRV
3427 break;
3428
3429 case RINGBUF_TYPE_PADDING:
3430 if (event->time_delta == 1)
3431 break;
957cdcd9 3432 fallthrough;
5b7be9c7
SRV
3433 case RINGBUF_TYPE_DATA:
3434 ts += event->time_delta;
3435 break;
3436
3437 default:
3438 RB_WARN_ON(cpu_buffer, 1);
3439 }
3440 }
3441 if ((full && ts > info->ts) ||
3442 (!full && ts + info->delta != info->ts)) {
f50345b4
SRG
3443 buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
3444 cpu_buffer->cpu,
3445 ts + info->delta, info->ts, info->delta,
3446 info->before, info->after,
3447 full ? " (full)" : "", show_interrupt_level());
5b7be9c7
SRV
3448 }
3449out:
3450 atomic_dec(this_cpu_ptr(&checking));
3451}
3452#else
3453static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3454 struct rb_event_info *info,
3455 unsigned long tail)
3456{
3457}
3458#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3459
6634ff26
SR
3460static struct ring_buffer_event *
3461__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
fcc742ea 3462 struct rb_event_info *info)
6634ff26 3463{
6634ff26 3464 struct ring_buffer_event *event;
fcc742ea 3465 struct buffer_page *tail_page;
a389d86f 3466 unsigned long tail, write, w;
69d1b839 3467
8573636e
SRRH
3468 /* Don't let the compiler play games with cpu_buffer->tail_page */
3469 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
a389d86f
SRV
3470
3471 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
3472 barrier();
c84897c0
SRG
3473 rb_time_read(&cpu_buffer->before_stamp, &info->before);
3474 rb_time_read(&cpu_buffer->write_stamp, &info->after);
a389d86f
SRV
3475 barrier();
3476 info->ts = rb_time_stamp(cpu_buffer->buffer);
3477
58fbc3c6 3478 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
a389d86f 3479 info->delta = info->ts;
a389d86f 3480 } else {
58fbc3c6
SRV
3481 /*
3482 * If interrupting an event time update, we may need an
3483 * absolute timestamp.
3484 * Don't bother if this is the start of a new page (w == 0).
3485 */
b3ae7b67
SRG
3486 if (!w) {
3487 /* Use the sub-buffer timestamp */
3488 info->delta = 0;
c84897c0 3489 } else if (unlikely(info->before != info->after)) {
58fbc3c6
SRV
3490 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3491 info->length += RB_LEN_TIME_EXTEND;
3492 } else {
3493 info->delta = info->ts - info->after;
3494 if (unlikely(test_time_stamp(info->delta))) {
3495 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3496 info->length += RB_LEN_TIME_EXTEND;
3497 }
10464b4a 3498 }
7c4b4a51 3499 }
b7dc42fd 3500
10464b4a 3501 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts);
a389d86f
SRV
3502
3503 /*C*/ write = local_add_return(info->length, &tail_page->write);
77ae365e
SR
3504
3505 /* set write to only the index of the write */
3506 write &= RB_WRITE_MASK;
a389d86f 3507
fcc742ea 3508 tail = write - info->length;
6634ff26 3509
a389d86f 3510 /* See if we shot pass the end of this buffer page */
139f8400 3511 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
9e45e39d 3512 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
a389d86f
SRV
3513 return rb_move_tail(cpu_buffer, tail, info);
3514 }
3515
3516 if (likely(tail == w)) {
a389d86f 3517 /* Nothing interrupted us between A and C */
10464b4a 3518 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
dd939425
SRG
3519 /*
3520 * If something came in between C and D, the write stamp
3521 * may now not be in sync. But that's fine as the before_stamp
3522 * will be different and then next event will just be forced
3523 * to use an absolute timestamp.
3524 */
7c4b4a51
SRV
3525 if (likely(!(info->add_timestamp &
3526 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
a389d86f 3527 /* This did not interrupt any time update */
58fbc3c6 3528 info->delta = info->ts - info->after;
a389d86f 3529 else
82db909e 3530 /* Just use full timestamp for interrupting event */
a389d86f 3531 info->delta = info->ts;
5b7be9c7 3532 check_buffer(cpu_buffer, info, tail);
a389d86f
SRV
3533 } else {
3534 u64 ts;
3535 /* SLOW PATH - Interrupted between A and C */
b803d7c6
SRG
3536
3537 /* Save the old before_stamp */
c84897c0 3538 rb_time_read(&cpu_buffer->before_stamp, &info->before);
b803d7c6
SRG
3539
3540 /*
3541 * Read a new timestamp and update the before_stamp to make
3542 * the next event after this one force using an absolute
3543 * timestamp. This is in case an interrupt were to come in
3544 * between E and F.
3545 */
a389d86f 3546 ts = rb_time_stamp(cpu_buffer->buffer);
b803d7c6
SRG
3547 rb_time_set(&cpu_buffer->before_stamp, ts);
3548
3549 barrier();
c84897c0 3550 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after);
a389d86f 3551 barrier();
b803d7c6
SRG
3552 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3553 info->after == info->before && info->after < ts) {
3554 /*
3555 * Nothing came after this event between C and F, it is
3556 * safe to use info->after for the delta as it
3557 * matched info->before and is still valid.
3558 */
58fbc3c6 3559 info->delta = ts - info->after;
a389d86f
SRV
3560 } else {
3561 /*
b803d7c6 3562 * Interrupted between C and F:
a389d86f
SRV
3563 * Lost the previous events time stamp. Just set the
3564 * delta to zero, and this will be the same time as
3565 * the event this event interrupted. And the events that
3566 * came after this will still be correct (as they would
3567 * have built their delta on the previous event.
3568 */
3569 info->delta = 0;
3570 }
8672e494 3571 info->ts = ts;
7c4b4a51 3572 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
a389d86f
SRV
3573 }
3574
6634ff26 3575 /*
a4543a2f 3576 * If this is the first commit on the page, then it has the same
b7dc42fd 3577 * timestamp as the page itself.
6634ff26 3578 */
7c4b4a51
SRV
3579 if (unlikely(!tail && !(info->add_timestamp &
3580 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
a4543a2f
SRRH
3581 info->delta = 0;
3582
b7dc42fd
SRRH
3583 /* We reserved something on the buffer */
3584
3585 event = __rb_page_index(tail_page, tail);
a4543a2f
SRRH
3586 rb_update_event(cpu_buffer, event, info);
3587
3588 local_inc(&tail_page->entries);
6634ff26 3589
b7dc42fd
SRRH
3590 /*
3591 * If this is the first commit on the page, then update
3592 * its timestamp.
3593 */
75b21c6d 3594 if (unlikely(!tail))
b7dc42fd
SRRH
3595 tail_page->page->time_stamp = info->ts;
3596
c64e148a 3597 /* account for these added bytes */
fcc742ea 3598 local_add(info->length, &cpu_buffer->entries_bytes);
c64e148a 3599
6634ff26
SR
3600 return event;
3601}
3602
fa7ffb39 3603static __always_inline struct ring_buffer_event *
13292494 3604rb_reserve_next_event(struct trace_buffer *buffer,
62f0b3eb 3605 struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 3606 unsigned long length)
7a8e76a3
SR
3607{
3608 struct ring_buffer_event *event;
fcc742ea 3609 struct rb_event_info info;
818e3dd3 3610 int nr_loops = 0;
58fbc3c6 3611 int add_ts_default;
7a8e76a3 3612
71229230
SRG
3613 /* ring buffer does cmpxchg, make sure it is safe in NMI context */
3614 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
3615 (unlikely(in_nmi()))) {
3616 return NULL;
3617 }
3618
fa743953 3619 rb_start_commit(cpu_buffer);
a389d86f 3620 /* The commit page can not change after this */
fa743953 3621
85bac32c 3622#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62f0b3eb
SR
3623 /*
3624 * Due to the ability to swap a cpu buffer from a buffer
3625 * it is possible it was swapped before we committed.
3626 * (committing stops a swap). We check for it here and
3627 * if it happened, we have to fail the write.
3628 */
3629 barrier();
6aa7de05 3630 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
62f0b3eb
SR
3631 local_dec(&cpu_buffer->committing);
3632 local_dec(&cpu_buffer->commits);
3633 return NULL;
3634 }
85bac32c 3635#endif
b7dc42fd 3636
fcc742ea 3637 info.length = rb_calculate_event_length(length);
58fbc3c6
SRV
3638
3639 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3640 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3641 info.length += RB_LEN_TIME_EXTEND;
139f8400 3642 if (info.length > cpu_buffer->buffer->max_data_size)
b3ae7b67 3643 goto out_fail;
58fbc3c6
SRV
3644 } else {
3645 add_ts_default = RB_ADD_STAMP_NONE;
3646 }
3647
a4543a2f 3648 again:
58fbc3c6 3649 info.add_timestamp = add_ts_default;
b7dc42fd
SRRH
3650 info.delta = 0;
3651
818e3dd3
SR
3652 /*
3653 * We allow for interrupts to reenter here and do a trace.
3654 * If one does, it will cause this original code to loop
3655 * back here. Even with heavy interrupts happening, this
3656 * should only happen a few times in a row. If this happens
3657 * 1000 times in a row, there must be either an interrupt
3658 * storm or we have something buggy.
3659 * Bail!
3660 */
3e89c7bb 3661 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
fa743953 3662 goto out_fail;
818e3dd3 3663
fcc742ea
SRRH
3664 event = __rb_reserve_next(cpu_buffer, &info);
3665
bd1b7cd3 3666 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
58fbc3c6 3667 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
bd1b7cd3 3668 info.length -= RB_LEN_TIME_EXTEND;
bf41a158 3669 goto again;
bd1b7cd3 3670 }
bf41a158 3671
a389d86f
SRV
3672 if (likely(event))
3673 return event;
fa743953
SR
3674 out_fail:
3675 rb_end_commit(cpu_buffer);
3676 return NULL;
7a8e76a3
SR
3677}
3678
3679/**
3680 * ring_buffer_lock_reserve - reserve a part of the buffer
3681 * @buffer: the ring buffer to reserve from
3682 * @length: the length of the data to reserve (excluding event header)
7a8e76a3 3683 *
6167c205 3684 * Returns a reserved event on the ring buffer to copy directly to.
7a8e76a3
SR
3685 * The user of this interface will need to get the body to write into
3686 * and can use the ring_buffer_event_data() interface.
3687 *
3688 * The length is the length of the data needed, not the event length
3689 * which also includes the event header.
3690 *
3691 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3692 * If NULL is returned, then nothing has been allocated or locked.
3693 */
3694struct ring_buffer_event *
13292494 3695ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
7a8e76a3
SR
3696{
3697 struct ring_buffer_per_cpu *cpu_buffer;
3698 struct ring_buffer_event *event;
5168ae50 3699 int cpu;
7a8e76a3 3700
bf41a158 3701 /* If we are tracing schedule, we don't want to recurse */
5168ae50 3702 preempt_disable_notrace();
bf41a158 3703
3205f806 3704 if (unlikely(atomic_read(&buffer->record_disabled)))
58a09ec6 3705 goto out;
261842b7 3706
7a8e76a3
SR
3707 cpu = raw_smp_processor_id();
3708
3205f806 3709 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
d769041f 3710 goto out;
7a8e76a3
SR
3711
3712 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 3713
3205f806 3714 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
d769041f 3715 goto out;
7a8e76a3 3716
139f8400 3717 if (unlikely(length > buffer->max_data_size))
bf41a158 3718 goto out;
7a8e76a3 3719
58a09ec6
SRRH
3720 if (unlikely(trace_recursive_lock(cpu_buffer)))
3721 goto out;
3722
62f0b3eb 3723 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 3724 if (!event)
58a09ec6 3725 goto out_unlock;
7a8e76a3
SR
3726
3727 return event;
3728
58a09ec6
SRRH
3729 out_unlock:
3730 trace_recursive_unlock(cpu_buffer);
d769041f 3731 out:
5168ae50 3732 preempt_enable_notrace();
7a8e76a3
SR
3733 return NULL;
3734}
c4f50183 3735EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3 3736
a1863c21
SR
3737/*
3738 * Decrement the entries to the page that an event is on.
3739 * The event does not even need to exist, only the pointer
3740 * to the page it is on. This may only be called before the commit
3741 * takes place.
3742 */
3743static inline void
3744rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3745 struct ring_buffer_event *event)
3746{
3747 unsigned long addr = (unsigned long)event;
3748 struct buffer_page *bpage = cpu_buffer->commit_page;
3749 struct buffer_page *start;
3750
3cb30911 3751 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
a1863c21
SR
3752
3753 /* Do the likely case first */
3754 if (likely(bpage->page == (void *)addr)) {
3755 local_dec(&bpage->entries);
3756 return;
3757 }
3758
3759 /*
3760 * Because the commit page may be on the reader page we
3761 * start with the next page and check the end loop there.
3762 */
6689bed3 3763 rb_inc_page(&bpage);
a1863c21
SR
3764 start = bpage;
3765 do {
3766 if (bpage->page == (void *)addr) {
3767 local_dec(&bpage->entries);
3768 return;
3769 }
6689bed3 3770 rb_inc_page(&bpage);
a1863c21
SR
3771 } while (bpage != start);
3772
3773 /* commit not part of this buffer?? */
3774 RB_WARN_ON(cpu_buffer, 1);
3775}
3776
fa1b47dd 3777/**
88883490 3778 * ring_buffer_discard_commit - discard an event that has not been committed
fa1b47dd
SR
3779 * @buffer: the ring buffer
3780 * @event: non committed event to discard
3781 *
dc892f73
SR
3782 * Sometimes an event that is in the ring buffer needs to be ignored.
3783 * This function lets the user discard an event in the ring buffer
3784 * and then that event will not be read later.
3785 *
6167c205 3786 * This function only works if it is called before the item has been
dc892f73 3787 * committed. It will try to free the event from the ring buffer
fa1b47dd
SR
3788 * if another event has not been added behind it.
3789 *
3790 * If another event has been added behind it, it will set the event
3791 * up as discarded, and perform the commit.
3792 *
3793 * If this function is called, do not call ring_buffer_unlock_commit on
3794 * the event.
3795 */
13292494 3796void ring_buffer_discard_commit(struct trace_buffer *buffer,
fa1b47dd
SR
3797 struct ring_buffer_event *event)
3798{
3799 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
3800 int cpu;
3801
3802 /* The event is discarded regardless */
f3b9aae1 3803 rb_event_discard(event);
fa1b47dd 3804
fa743953
SR
3805 cpu = smp_processor_id();
3806 cpu_buffer = buffer->buffers[cpu];
3807
fa1b47dd
SR
3808 /*
3809 * This must only be called if the event has not been
3810 * committed yet. Thus we can assume that preemption
3811 * is still disabled.
3812 */
fa743953 3813 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
fa1b47dd 3814
a1863c21 3815 rb_decrement_entry(cpu_buffer, event);
0f2541d2 3816 if (rb_try_to_discard(cpu_buffer, event))
edd813bf 3817 goto out;
fa1b47dd 3818
fa1b47dd 3819 out:
fa743953 3820 rb_end_commit(cpu_buffer);
fa1b47dd 3821
58a09ec6 3822 trace_recursive_unlock(cpu_buffer);
f3b9aae1 3823
5168ae50 3824 preempt_enable_notrace();
fa1b47dd
SR
3825
3826}
3827EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3828
7a8e76a3
SR
3829/**
3830 * ring_buffer_write - write data to the buffer without reserving
3831 * @buffer: The ring buffer to write to.
3832 * @length: The length of the data being written (excluding the event header)
3833 * @data: The data to write to the buffer.
3834 *
3835 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3836 * one function. If you already have the data to write to the buffer, it
3837 * may be easier to simply call this function.
3838 *
3839 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3840 * and not the length of the event which would hold the header.
3841 */
13292494 3842int ring_buffer_write(struct trace_buffer *buffer,
01e3e710
DS
3843 unsigned long length,
3844 void *data)
7a8e76a3
SR
3845{
3846 struct ring_buffer_per_cpu *cpu_buffer;
3847 struct ring_buffer_event *event;
7a8e76a3
SR
3848 void *body;
3849 int ret = -EBUSY;
5168ae50 3850 int cpu;
7a8e76a3 3851
5168ae50 3852 preempt_disable_notrace();
bf41a158 3853
52fbe9cd
LJ
3854 if (atomic_read(&buffer->record_disabled))
3855 goto out;
3856
7a8e76a3
SR
3857 cpu = raw_smp_processor_id();
3858
9e01c1b7 3859 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 3860 goto out;
7a8e76a3
SR
3861
3862 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
3863
3864 if (atomic_read(&cpu_buffer->record_disabled))
3865 goto out;
3866
139f8400 3867 if (length > buffer->max_data_size)
be957c44
SR
3868 goto out;
3869
985e871b
SRRH
3870 if (unlikely(trace_recursive_lock(cpu_buffer)))
3871 goto out;
3872
62f0b3eb 3873 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 3874 if (!event)
985e871b 3875 goto out_unlock;
7a8e76a3
SR
3876
3877 body = rb_event_data(event);
3878
3879 memcpy(body, data, length);
3880
04aabc32 3881 rb_commit(cpu_buffer);
7a8e76a3 3882
15693458
SRRH
3883 rb_wakeups(buffer, cpu_buffer);
3884
7a8e76a3 3885 ret = 0;
985e871b
SRRH
3886
3887 out_unlock:
3888 trace_recursive_unlock(cpu_buffer);
3889
7a8e76a3 3890 out:
5168ae50 3891 preempt_enable_notrace();
7a8e76a3
SR
3892
3893 return ret;
3894}
c4f50183 3895EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 3896
da58834c 3897static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
3898{
3899 struct buffer_page *reader = cpu_buffer->reader_page;
77ae365e 3900 struct buffer_page *head = rb_set_head_page(cpu_buffer);
bf41a158
SR
3901 struct buffer_page *commit = cpu_buffer->commit_page;
3902
77ae365e
SR
3903 /* In case of error, head will be NULL */
3904 if (unlikely(!head))
da58834c 3905 return true;
77ae365e 3906
67f0d6d9
HL
3907 /* Reader should exhaust content in reader page */
3908 if (reader->read != rb_page_commit(reader))
3909 return false;
3910
3911 /*
3912 * If writers are committing on the reader page, knowing all
3913 * committed content has been read, the ring buffer is empty.
3914 */
3915 if (commit == reader)
3916 return true;
3917
3918 /*
3919 * If writers are committing on a page other than reader page
3920 * and head page, there should always be content to read.
3921 */
3922 if (commit != head)
3923 return false;
3924
3925 /*
3926 * Writers are committing on the head page, we just need
3927 * to care about there're committed data, and the reader will
3928 * swap reader page with head page when it is to read data.
3929 */
3930 return rb_page_commit(commit) == 0;
bf41a158
SR
3931}
3932
7a8e76a3
SR
3933/**
3934 * ring_buffer_record_disable - stop all writes into the buffer
3935 * @buffer: The ring buffer to stop writes to.
3936 *
3937 * This prevents all writes to the buffer. Any attempt to write
3938 * to the buffer after this will fail and return NULL.
3939 *
74401729 3940 * The caller should call synchronize_rcu() after this.
7a8e76a3 3941 */
13292494 3942void ring_buffer_record_disable(struct trace_buffer *buffer)
7a8e76a3
SR
3943{
3944 atomic_inc(&buffer->record_disabled);
3945}
c4f50183 3946EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
3947
3948/**
3949 * ring_buffer_record_enable - enable writes to the buffer
3950 * @buffer: The ring buffer to enable writes
3951 *
3952 * Note, multiple disables will need the same number of enables
c41b20e7 3953 * to truly enable the writing (much like preempt_disable).
7a8e76a3 3954 */
13292494 3955void ring_buffer_record_enable(struct trace_buffer *buffer)
7a8e76a3
SR
3956{
3957 atomic_dec(&buffer->record_disabled);
3958}
c4f50183 3959EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3 3960
499e5470
SR
3961/**
3962 * ring_buffer_record_off - stop all writes into the buffer
3963 * @buffer: The ring buffer to stop writes to.
3964 *
3965 * This prevents all writes to the buffer. Any attempt to write
3966 * to the buffer after this will fail and return NULL.
3967 *
3968 * This is different than ring_buffer_record_disable() as
87abb3b1 3969 * it works like an on/off switch, where as the disable() version
499e5470
SR
3970 * must be paired with a enable().
3971 */
13292494 3972void ring_buffer_record_off(struct trace_buffer *buffer)
499e5470
SR
3973{
3974 unsigned int rd;
3975 unsigned int new_rd;
3976
8328e36d 3977 rd = atomic_read(&buffer->record_disabled);
499e5470 3978 do {
499e5470 3979 new_rd = rd | RB_BUFFER_OFF;
8328e36d 3980 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
499e5470
SR
3981}
3982EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3983
3984/**
3985 * ring_buffer_record_on - restart writes into the buffer
3986 * @buffer: The ring buffer to start writes to.
3987 *
3988 * This enables all writes to the buffer that was disabled by
3989 * ring_buffer_record_off().
3990 *
3991 * This is different than ring_buffer_record_enable() as
87abb3b1 3992 * it works like an on/off switch, where as the enable() version
499e5470
SR
3993 * must be paired with a disable().
3994 */
13292494 3995void ring_buffer_record_on(struct trace_buffer *buffer)
499e5470
SR
3996{
3997 unsigned int rd;
3998 unsigned int new_rd;
3999
8328e36d 4000 rd = atomic_read(&buffer->record_disabled);
499e5470 4001 do {
499e5470 4002 new_rd = rd & ~RB_BUFFER_OFF;
8328e36d 4003 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
499e5470
SR
4004}
4005EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4006
4007/**
4008 * ring_buffer_record_is_on - return true if the ring buffer can write
4009 * @buffer: The ring buffer to see if write is enabled
4010 *
4011 * Returns true if the ring buffer is in a state that it accepts writes.
4012 */
13292494 4013bool ring_buffer_record_is_on(struct trace_buffer *buffer)
499e5470
SR
4014{
4015 return !atomic_read(&buffer->record_disabled);
4016}
4017
73c8d894
MH
4018/**
4019 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4020 * @buffer: The ring buffer to see if write is set enabled
4021 *
4022 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4023 * Note that this does NOT mean it is in a writable state.
4024 *
4025 * It may return true when the ring buffer has been disabled by
4026 * ring_buffer_record_disable(), as that is a temporary disabling of
4027 * the ring buffer.
4028 */
13292494 4029bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
73c8d894
MH
4030{
4031 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4032}
4033
7a8e76a3
SR
4034/**
4035 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4036 * @buffer: The ring buffer to stop writes to.
4037 * @cpu: The CPU buffer to stop
4038 *
4039 * This prevents all writes to the buffer. Any attempt to write
4040 * to the buffer after this will fail and return NULL.
4041 *
74401729 4042 * The caller should call synchronize_rcu() after this.
7a8e76a3 4043 */
13292494 4044void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4045{
4046 struct ring_buffer_per_cpu *cpu_buffer;
4047
9e01c1b7 4048 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4049 return;
7a8e76a3
SR
4050
4051 cpu_buffer = buffer->buffers[cpu];
4052 atomic_inc(&cpu_buffer->record_disabled);
4053}
c4f50183 4054EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
4055
4056/**
4057 * ring_buffer_record_enable_cpu - enable writes to the buffer
4058 * @buffer: The ring buffer to enable writes
4059 * @cpu: The CPU to enable.
4060 *
4061 * Note, multiple disables will need the same number of enables
c41b20e7 4062 * to truly enable the writing (much like preempt_disable).
7a8e76a3 4063 */
13292494 4064void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4065{
4066 struct ring_buffer_per_cpu *cpu_buffer;
4067
9e01c1b7 4068 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4069 return;
7a8e76a3
SR
4070
4071 cpu_buffer = buffer->buffers[cpu];
4072 atomic_dec(&cpu_buffer->record_disabled);
4073}
c4f50183 4074EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3 4075
f6195aa0
SR
4076/*
4077 * The total entries in the ring buffer is the running counter
4078 * of entries entered into the ring buffer, minus the sum of
4079 * the entries read from the ring buffer and the number of
4080 * entries that were overwritten.
4081 */
4082static inline unsigned long
4083rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4084{
4085 return local_read(&cpu_buffer->entries) -
4086 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4087}
4088
c64e148a
VN
4089/**
4090 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4091 * @buffer: The ring buffer
4092 * @cpu: The per CPU buffer to read from.
4093 */
13292494 4094u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
c64e148a
VN
4095{
4096 unsigned long flags;
4097 struct ring_buffer_per_cpu *cpu_buffer;
4098 struct buffer_page *bpage;
da830e58 4099 u64 ret = 0;
c64e148a
VN
4100
4101 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4102 return 0;
4103
4104 cpu_buffer = buffer->buffers[cpu];
7115e3fc 4105 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
c64e148a
VN
4106 /*
4107 * if the tail is on reader_page, oldest time stamp is on the reader
4108 * page
4109 */
4110 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4111 bpage = cpu_buffer->reader_page;
4112 else
4113 bpage = rb_set_head_page(cpu_buffer);
54f7be5b
SR
4114 if (bpage)
4115 ret = bpage->page->time_stamp;
7115e3fc 4116 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
c64e148a
VN
4117
4118 return ret;
4119}
4120EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4121
4122/**
45d99ea4 4123 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
c64e148a
VN
4124 * @buffer: The ring buffer
4125 * @cpu: The per CPU buffer to read from.
4126 */
13292494 4127unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
c64e148a
VN
4128{
4129 struct ring_buffer_per_cpu *cpu_buffer;
4130 unsigned long ret;
4131
4132 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4133 return 0;
4134
4135 cpu_buffer = buffer->buffers[cpu];
4136 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4137
4138 return ret;
4139}
4140EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4141
7a8e76a3
SR
4142/**
4143 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4144 * @buffer: The ring buffer
4145 * @cpu: The per CPU buffer to get the entries from.
4146 */
13292494 4147unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4148{
4149 struct ring_buffer_per_cpu *cpu_buffer;
4150
9e01c1b7 4151 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4152 return 0;
7a8e76a3
SR
4153
4154 cpu_buffer = buffer->buffers[cpu];
554f786e 4155
f6195aa0 4156 return rb_num_of_entries(cpu_buffer);
7a8e76a3 4157}
c4f50183 4158EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
4159
4160/**
884bfe89
SP
4161 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4162 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
7a8e76a3
SR
4163 * @buffer: The ring buffer
4164 * @cpu: The per CPU buffer to get the number of overruns from
4165 */
13292494 4166unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4167{
4168 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 4169 unsigned long ret;
7a8e76a3 4170
9e01c1b7 4171 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4172 return 0;
7a8e76a3
SR
4173
4174 cpu_buffer = buffer->buffers[cpu];
77ae365e 4175 ret = local_read(&cpu_buffer->overrun);
554f786e
SR
4176
4177 return ret;
7a8e76a3 4178}
c4f50183 4179EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 4180
f0d2c681 4181/**
884bfe89
SP
4182 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4183 * commits failing due to the buffer wrapping around while there are uncommitted
4184 * events, such as during an interrupt storm.
f0d2c681
SR
4185 * @buffer: The ring buffer
4186 * @cpu: The per CPU buffer to get the number of overruns from
4187 */
4188unsigned long
13292494 4189ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
f0d2c681
SR
4190{
4191 struct ring_buffer_per_cpu *cpu_buffer;
4192 unsigned long ret;
4193
4194 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4195 return 0;
4196
4197 cpu_buffer = buffer->buffers[cpu];
77ae365e 4198 ret = local_read(&cpu_buffer->commit_overrun);
f0d2c681
SR
4199
4200 return ret;
4201}
4202EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4203
884bfe89
SP
4204/**
4205 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4206 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4207 * @buffer: The ring buffer
4208 * @cpu: The per CPU buffer to get the number of overruns from
4209 */
4210unsigned long
13292494 4211ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
884bfe89
SP
4212{
4213 struct ring_buffer_per_cpu *cpu_buffer;
4214 unsigned long ret;
4215
4216 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4217 return 0;
4218
4219 cpu_buffer = buffer->buffers[cpu];
4220 ret = local_read(&cpu_buffer->dropped_events);
4221
4222 return ret;
4223}
4224EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4225
ad964704
SRRH
4226/**
4227 * ring_buffer_read_events_cpu - get the number of events successfully read
4228 * @buffer: The ring buffer
4229 * @cpu: The per CPU buffer to get the number of events read
4230 */
4231unsigned long
13292494 4232ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
ad964704
SRRH
4233{
4234 struct ring_buffer_per_cpu *cpu_buffer;
4235
4236 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4237 return 0;
4238
4239 cpu_buffer = buffer->buffers[cpu];
4240 return cpu_buffer->read;
4241}
4242EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4243
7a8e76a3
SR
4244/**
4245 * ring_buffer_entries - get the number of entries in a buffer
4246 * @buffer: The ring buffer
4247 *
4248 * Returns the total number of entries in the ring buffer
4249 * (all CPU entries)
4250 */
13292494 4251unsigned long ring_buffer_entries(struct trace_buffer *buffer)
7a8e76a3
SR
4252{
4253 struct ring_buffer_per_cpu *cpu_buffer;
4254 unsigned long entries = 0;
4255 int cpu;
4256
4257 /* if you care about this being correct, lock the buffer */
4258 for_each_buffer_cpu(buffer, cpu) {
4259 cpu_buffer = buffer->buffers[cpu];
f6195aa0 4260 entries += rb_num_of_entries(cpu_buffer);
7a8e76a3
SR
4261 }
4262
4263 return entries;
4264}
c4f50183 4265EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
4266
4267/**
67b394f7 4268 * ring_buffer_overruns - get the number of overruns in buffer
7a8e76a3
SR
4269 * @buffer: The ring buffer
4270 *
4271 * Returns the total number of overruns in the ring buffer
4272 * (all CPU entries)
4273 */
13292494 4274unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
7a8e76a3
SR
4275{
4276 struct ring_buffer_per_cpu *cpu_buffer;
4277 unsigned long overruns = 0;
4278 int cpu;
4279
4280 /* if you care about this being correct, lock the buffer */
4281 for_each_buffer_cpu(buffer, cpu) {
4282 cpu_buffer = buffer->buffers[cpu];
77ae365e 4283 overruns += local_read(&cpu_buffer->overrun);
7a8e76a3
SR
4284 }
4285
4286 return overruns;
4287}
c4f50183 4288EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 4289
642edba5 4290static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
4291{
4292 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4293
d769041f 4294 /* Iterator usage is expected to have record disabled */
651e22f2
SRRH
4295 iter->head_page = cpu_buffer->reader_page;
4296 iter->head = cpu_buffer->reader_page->read;
785888c5 4297 iter->next_event = iter->head;
651e22f2
SRRH
4298
4299 iter->cache_reader_page = iter->head_page;
24607f11 4300 iter->cache_read = cpu_buffer->read;
2d093282 4301 iter->cache_pages_removed = cpu_buffer->pages_removed;
651e22f2 4302
28e3fc56 4303 if (iter->head) {
d769041f 4304 iter->read_stamp = cpu_buffer->read_stamp;
28e3fc56
SRV
4305 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4306 } else {
abc9b56d 4307 iter->read_stamp = iter->head_page->page->time_stamp;
28e3fc56
SRV
4308 iter->page_stamp = iter->read_stamp;
4309 }
642edba5 4310}
f83c9d0f 4311
642edba5
SR
4312/**
4313 * ring_buffer_iter_reset - reset an iterator
4314 * @iter: The iterator to reset
4315 *
4316 * Resets the iterator, so that it will start from the beginning
4317 * again.
4318 */
4319void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4320{
554f786e 4321 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
4322 unsigned long flags;
4323
554f786e
SR
4324 if (!iter)
4325 return;
4326
4327 cpu_buffer = iter->cpu_buffer;
4328
5389f6fa 4329 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
642edba5 4330 rb_iter_reset(iter);
5389f6fa 4331 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 4332}
c4f50183 4333EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
4334
4335/**
4336 * ring_buffer_iter_empty - check if an iterator has no more to read
4337 * @iter: The iterator to check
4338 */
4339int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4340{
4341 struct ring_buffer_per_cpu *cpu_buffer;
78f7a45d
SRV
4342 struct buffer_page *reader;
4343 struct buffer_page *head_page;
4344 struct buffer_page *commit_page;
ead6ecfd 4345 struct buffer_page *curr_commit_page;
78f7a45d 4346 unsigned commit;
ead6ecfd
SRV
4347 u64 curr_commit_ts;
4348 u64 commit_ts;
7a8e76a3
SR
4349
4350 cpu_buffer = iter->cpu_buffer;
78f7a45d
SRV
4351 reader = cpu_buffer->reader_page;
4352 head_page = cpu_buffer->head_page;
4353 commit_page = cpu_buffer->commit_page;
ead6ecfd
SRV
4354 commit_ts = commit_page->page->time_stamp;
4355
4356 /*
4357 * When the writer goes across pages, it issues a cmpxchg which
4358 * is a mb(), which will synchronize with the rmb here.
4359 * (see rb_tail_page_update())
4360 */
4361 smp_rmb();
78f7a45d 4362 commit = rb_page_commit(commit_page);
ead6ecfd
SRV
4363 /* We want to make sure that the commit page doesn't change */
4364 smp_rmb();
4365
4366 /* Make sure commit page didn't change */
4367 curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4368 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4369
4370 /* If the commit page changed, then there's more data */
4371 if (curr_commit_page != commit_page ||
4372 curr_commit_ts != commit_ts)
4373 return 0;
78f7a45d 4374
ead6ecfd 4375 /* Still racy, as it may return a false positive, but that's OK */
785888c5 4376 return ((iter->head_page == commit_page && iter->head >= commit) ||
78f7a45d
SRV
4377 (iter->head_page == reader && commit_page == head_page &&
4378 head_page->read == commit &&
4379 iter->head == rb_page_commit(cpu_buffer->reader_page)));
7a8e76a3 4380}
c4f50183 4381EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
4382
4383static void
4384rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4385 struct ring_buffer_event *event)
4386{
4387 u64 delta;
4388
334d4169 4389 switch (event->type_len) {
7a8e76a3
SR
4390 case RINGBUF_TYPE_PADDING:
4391 return;
4392
4393 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 4394 delta = rb_event_time_stamp(event);
7a8e76a3
SR
4395 cpu_buffer->read_stamp += delta;
4396 return;
4397
4398 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 4399 delta = rb_event_time_stamp(event);
6695da58 4400 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
dc4e2801 4401 cpu_buffer->read_stamp = delta;
7a8e76a3
SR
4402 return;
4403
4404 case RINGBUF_TYPE_DATA:
4405 cpu_buffer->read_stamp += event->time_delta;
4406 return;
4407
4408 default:
da4d401a 4409 RB_WARN_ON(cpu_buffer, 1);
7a8e76a3 4410 }
7a8e76a3
SR
4411}
4412
4413static void
4414rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4415 struct ring_buffer_event *event)
4416{
4417 u64 delta;
4418
334d4169 4419 switch (event->type_len) {
7a8e76a3
SR
4420 case RINGBUF_TYPE_PADDING:
4421 return;
4422
4423 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 4424 delta = rb_event_time_stamp(event);
7a8e76a3
SR
4425 iter->read_stamp += delta;
4426 return;
4427
4428 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 4429 delta = rb_event_time_stamp(event);
6695da58 4430 delta = rb_fix_abs_ts(delta, iter->read_stamp);
dc4e2801 4431 iter->read_stamp = delta;
7a8e76a3
SR
4432 return;
4433
4434 case RINGBUF_TYPE_DATA:
4435 iter->read_stamp += event->time_delta;
4436 return;
4437
4438 default:
da4d401a 4439 RB_WARN_ON(iter->cpu_buffer, 1);
7a8e76a3 4440 }
7a8e76a3
SR
4441}
4442
d769041f
SR
4443static struct buffer_page *
4444rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 4445{
d769041f 4446 struct buffer_page *reader = NULL;
139f8400 4447 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
66a8cb95 4448 unsigned long overwrite;
d769041f 4449 unsigned long flags;
818e3dd3 4450 int nr_loops = 0;
bc92b956 4451 bool ret;
d769041f 4452
3e03fb7f 4453 local_irq_save(flags);
0199c4e6 4454 arch_spin_lock(&cpu_buffer->lock);
d769041f
SR
4455
4456 again:
818e3dd3
SR
4457 /*
4458 * This should normally only loop twice. But because the
4459 * start of the reader inserts an empty page, it causes
4460 * a case where we will loop three times. There should be no
4461 * reason to loop four times (that I know of).
4462 */
3e89c7bb 4463 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
4464 reader = NULL;
4465 goto out;
4466 }
4467
d769041f
SR
4468 reader = cpu_buffer->reader_page;
4469
4470 /* If there's more to read, return this page */
bf41a158 4471 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
4472 goto out;
4473
4474 /* Never should we have an index greater than the size */
3e89c7bb
SR
4475 if (RB_WARN_ON(cpu_buffer,
4476 cpu_buffer->reader_page->read > rb_page_size(reader)))
4477 goto out;
d769041f
SR
4478
4479 /* check if we caught up to the tail */
4480 reader = NULL;
bf41a158 4481 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 4482 goto out;
7a8e76a3 4483
a5fb8331
SR
4484 /* Don't bother swapping if the ring buffer is empty */
4485 if (rb_num_of_entries(cpu_buffer) == 0)
4486 goto out;
4487
7a8e76a3 4488 /*
d769041f 4489 * Reset the reader page to size zero.
7a8e76a3 4490 */
77ae365e
SR
4491 local_set(&cpu_buffer->reader_page->write, 0);
4492 local_set(&cpu_buffer->reader_page->entries, 0);
4493 local_set(&cpu_buffer->reader_page->page->commit, 0);
ff0ff84a 4494 cpu_buffer->reader_page->real_end = 0;
7a8e76a3 4495
77ae365e
SR
4496 spin:
4497 /*
4498 * Splice the empty reader page into the list around the head.
4499 */
4500 reader = rb_set_head_page(cpu_buffer);
54f7be5b
SR
4501 if (!reader)
4502 goto out;
0e1ff5d7 4503 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
d769041f 4504 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158 4505
3adc54fa
SR
4506 /*
4507 * cpu_buffer->pages just needs to point to the buffer, it
4508 * has no specific buffer page to point to. Lets move it out
25985edc 4509 * of our way so we don't accidentally swap it.
3adc54fa
SR
4510 */
4511 cpu_buffer->pages = reader->list.prev;
4512
77ae365e 4513 /* The reader page will be pointing to the new head */
6689bed3 4514 rb_set_list_to_head(&cpu_buffer->reader_page->list);
7a8e76a3 4515
66a8cb95
SR
4516 /*
4517 * We want to make sure we read the overruns after we set up our
4518 * pointers to the next object. The writer side does a
4519 * cmpxchg to cross pages which acts as the mb on the writer
4520 * side. Note, the reader will constantly fail the swap
4521 * while the writer is updating the pointers, so this
4522 * guarantees that the overwrite recorded here is the one we
4523 * want to compare with the last_overrun.
4524 */
4525 smp_mb();
4526 overwrite = local_read(&(cpu_buffer->overrun));
4527
77ae365e
SR
4528 /*
4529 * Here's the tricky part.
4530 *
4531 * We need to move the pointer past the header page.
4532 * But we can only do that if a writer is not currently
4533 * moving it. The page before the header page has the
4534 * flag bit '1' set if it is pointing to the page we want.
4535 * but if the writer is in the process of moving it
4536 * than it will be '2' or already moved '0'.
4537 */
4538
4539 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
7a8e76a3
SR
4540
4541 /*
77ae365e 4542 * If we did not convert it, then we must try again.
7a8e76a3 4543 */
77ae365e
SR
4544 if (!ret)
4545 goto spin;
7a8e76a3 4546
77ae365e 4547 /*
2c2b0a78 4548 * Yay! We succeeded in replacing the page.
77ae365e
SR
4549 *
4550 * Now make the new head point back to the reader page.
4551 */
5ded3dc6 4552 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
6689bed3 4553 rb_inc_page(&cpu_buffer->head_page);
d769041f 4554
2c2b0a78
SRV
4555 local_inc(&cpu_buffer->pages_read);
4556
d769041f
SR
4557 /* Finally update the reader page to the new head */
4558 cpu_buffer->reader_page = reader;
b81f472a 4559 cpu_buffer->reader_page->read = 0;
d769041f 4560
66a8cb95
SR
4561 if (overwrite != cpu_buffer->last_overrun) {
4562 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4563 cpu_buffer->last_overrun = overwrite;
4564 }
4565
d769041f
SR
4566 goto again;
4567
4568 out:
b81f472a
SRRH
4569 /* Update the read_stamp on the first event */
4570 if (reader && reader->read == 0)
4571 cpu_buffer->read_stamp = reader->page->time_stamp;
4572
0199c4e6 4573 arch_spin_unlock(&cpu_buffer->lock);
3e03fb7f 4574 local_irq_restore(flags);
d769041f 4575
a0fcaaed
SRG
4576 /*
4577 * The writer has preempt disable, wait for it. But not forever
4578 * Although, 1 second is pretty much "forever"
4579 */
4580#define USECS_WAIT 1000000
4581 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4582 /* If the write is past the end of page, a writer is still updating it */
139f8400 4583 if (likely(!reader || rb_page_write(reader) <= bsize))
a0fcaaed
SRG
4584 break;
4585
4586 udelay(1);
4587
4588 /* Get the latest version of the reader write value */
4589 smp_rmb();
4590 }
4591
4592 /* The writer is not moving forward? Something is wrong */
4593 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4594 reader = NULL;
4595
4596 /*
4597 * Make sure we see any padding after the write update
6455b616
ZY
4598 * (see rb_reset_tail()).
4599 *
4600 * In addition, a writer may be writing on the reader page
4601 * if the page has not been fully filled, so the read barrier
4602 * is also needed to make sure we see the content of what is
4603 * committed by the writer (see rb_set_commit_to_write()).
a0fcaaed
SRG
4604 */
4605 smp_rmb();
4606
4607
d769041f
SR
4608 return reader;
4609}
4610
4611static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4612{
4613 struct ring_buffer_event *event;
4614 struct buffer_page *reader;
4615 unsigned length;
4616
4617 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 4618
d769041f 4619 /* This function should not be called when buffer is empty */
3e89c7bb
SR
4620 if (RB_WARN_ON(cpu_buffer, !reader))
4621 return;
7a8e76a3 4622
d769041f
SR
4623 event = rb_reader_event(cpu_buffer);
4624
a1863c21 4625 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
e4906eff 4626 cpu_buffer->read++;
d769041f
SR
4627
4628 rb_update_read_stamp(cpu_buffer, event);
4629
4630 length = rb_event_length(event);
6f807acd 4631 cpu_buffer->reader_page->read += length;
45d99ea4 4632 cpu_buffer->read_bytes += length;
7a8e76a3
SR
4633}
4634
4635static void rb_advance_iter(struct ring_buffer_iter *iter)
4636{
7a8e76a3 4637 struct ring_buffer_per_cpu *cpu_buffer;
7a8e76a3
SR
4638
4639 cpu_buffer = iter->cpu_buffer;
7a8e76a3 4640
785888c5
SRV
4641 /* If head == next_event then we need to jump to the next event */
4642 if (iter->head == iter->next_event) {
4643 /* If the event gets overwritten again, there's nothing to do */
4644 if (rb_iter_head_event(iter) == NULL)
4645 return;
4646 }
4647
4648 iter->head = iter->next_event;
4649
7a8e76a3
SR
4650 /*
4651 * Check if we are at the end of the buffer.
4652 */
785888c5 4653 if (iter->next_event >= rb_page_size(iter->head_page)) {
ea05b57c
SR
4654 /* discarded commits can make the page empty */
4655 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 4656 return;
d769041f 4657 rb_inc_iter(iter);
7a8e76a3
SR
4658 return;
4659 }
4660
785888c5 4661 rb_update_iter_read_stamp(iter, iter->event);
7a8e76a3
SR
4662}
4663
66a8cb95
SR
4664static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4665{
4666 return cpu_buffer->lost_events;
4667}
4668
f83c9d0f 4669static struct ring_buffer_event *
66a8cb95
SR
4670rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4671 unsigned long *lost_events)
7a8e76a3 4672{
7a8e76a3 4673 struct ring_buffer_event *event;
d769041f 4674 struct buffer_page *reader;
818e3dd3 4675 int nr_loops = 0;
7a8e76a3 4676
dc4e2801
TZ
4677 if (ts)
4678 *ts = 0;
7a8e76a3 4679 again:
818e3dd3 4680 /*
69d1b839
SR
4681 * We repeat when a time extend is encountered.
4682 * Since the time extend is always attached to a data event,
4683 * we should never loop more than once.
4684 * (We never hit the following condition more than twice).
818e3dd3 4685 */
69d1b839 4686 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
818e3dd3 4687 return NULL;
818e3dd3 4688
d769041f
SR
4689 reader = rb_get_reader_page(cpu_buffer);
4690 if (!reader)
7a8e76a3
SR
4691 return NULL;
4692
d769041f 4693 event = rb_reader_event(cpu_buffer);
7a8e76a3 4694
334d4169 4695 switch (event->type_len) {
7a8e76a3 4696 case RINGBUF_TYPE_PADDING:
2d622719
TZ
4697 if (rb_null_event(event))
4698 RB_WARN_ON(cpu_buffer, 1);
4699 /*
4700 * Because the writer could be discarding every
4701 * event it creates (which would probably be bad)
4702 * if we were to go back to "again" then we may never
4703 * catch up, and will trigger the warn on, or lock
4704 * the box. Return the padding, and we will release
4705 * the current locks, and try again.
4706 */
2d622719 4707 return event;
7a8e76a3
SR
4708
4709 case RINGBUF_TYPE_TIME_EXTEND:
4710 /* Internal data, OK to advance */
d769041f 4711 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
4712 goto again;
4713
4714 case RINGBUF_TYPE_TIME_STAMP:
dc4e2801 4715 if (ts) {
e20044f7 4716 *ts = rb_event_time_stamp(event);
6695da58 4717 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
dc4e2801
TZ
4718 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4719 cpu_buffer->cpu, ts);
4720 }
4721 /* Internal data, OK to advance */
d769041f 4722 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
4723 goto again;
4724
4725 case RINGBUF_TYPE_DATA:
dc4e2801 4726 if (ts && !(*ts)) {
7a8e76a3 4727 *ts = cpu_buffer->read_stamp + event->time_delta;
d8eeb2d3 4728 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
37886f6a 4729 cpu_buffer->cpu, ts);
7a8e76a3 4730 }
66a8cb95
SR
4731 if (lost_events)
4732 *lost_events = rb_lost_events(cpu_buffer);
7a8e76a3
SR
4733 return event;
4734
4735 default:
da4d401a 4736 RB_WARN_ON(cpu_buffer, 1);
7a8e76a3
SR
4737 }
4738
4739 return NULL;
4740}
c4f50183 4741EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 4742
f83c9d0f
SR
4743static struct ring_buffer_event *
4744rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3 4745{
13292494 4746 struct trace_buffer *buffer;
7a8e76a3
SR
4747 struct ring_buffer_per_cpu *cpu_buffer;
4748 struct ring_buffer_event *event;
818e3dd3 4749 int nr_loops = 0;
7a8e76a3 4750
dc4e2801
TZ
4751 if (ts)
4752 *ts = 0;
4753
7a8e76a3
SR
4754 cpu_buffer = iter->cpu_buffer;
4755 buffer = cpu_buffer->buffer;
4756
492a74f4 4757 /*
2d093282
ZY
4758 * Check if someone performed a consuming read to the buffer
4759 * or removed some pages from the buffer. In these cases,
4760 * iterator was invalidated and we need to reset it.
492a74f4
SR
4761 */
4762 if (unlikely(iter->cache_read != cpu_buffer->read ||
2d093282
ZY
4763 iter->cache_reader_page != cpu_buffer->reader_page ||
4764 iter->cache_pages_removed != cpu_buffer->pages_removed))
492a74f4
SR
4765 rb_iter_reset(iter);
4766
7a8e76a3 4767 again:
3c05d748
SR
4768 if (ring_buffer_iter_empty(iter))
4769 return NULL;
4770
818e3dd3 4771 /*
3d2353de
SRV
4772 * As the writer can mess with what the iterator is trying
4773 * to read, just give up if we fail to get an event after
4774 * three tries. The iterator is not as reliable when reading
4775 * the ring buffer with an active write as the consumer is.
4776 * Do not warn if the three failures is reached.
818e3dd3 4777 */
3d2353de 4778 if (++nr_loops > 3)
818e3dd3 4779 return NULL;
818e3dd3 4780
7a8e76a3
SR
4781 if (rb_per_cpu_empty(cpu_buffer))
4782 return NULL;
4783
10e83fd0 4784 if (iter->head >= rb_page_size(iter->head_page)) {
3c05d748
SR
4785 rb_inc_iter(iter);
4786 goto again;
4787 }
4788
7a8e76a3 4789 event = rb_iter_head_event(iter);
3d2353de 4790 if (!event)
785888c5 4791 goto again;
7a8e76a3 4792
334d4169 4793 switch (event->type_len) {
7a8e76a3 4794 case RINGBUF_TYPE_PADDING:
2d622719
TZ
4795 if (rb_null_event(event)) {
4796 rb_inc_iter(iter);
4797 goto again;
4798 }
4799 rb_advance_iter(iter);
4800 return event;
7a8e76a3
SR
4801
4802 case RINGBUF_TYPE_TIME_EXTEND:
4803 /* Internal data, OK to advance */
4804 rb_advance_iter(iter);
4805 goto again;
4806
4807 case RINGBUF_TYPE_TIME_STAMP:
dc4e2801 4808 if (ts) {
e20044f7 4809 *ts = rb_event_time_stamp(event);
6695da58 4810 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
dc4e2801
TZ
4811 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4812 cpu_buffer->cpu, ts);
4813 }
4814 /* Internal data, OK to advance */
7a8e76a3
SR
4815 rb_advance_iter(iter);
4816 goto again;
4817
4818 case RINGBUF_TYPE_DATA:
dc4e2801 4819 if (ts && !(*ts)) {
7a8e76a3 4820 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
4821 ring_buffer_normalize_time_stamp(buffer,
4822 cpu_buffer->cpu, ts);
7a8e76a3
SR
4823 }
4824 return event;
4825
4826 default:
da4d401a 4827 RB_WARN_ON(cpu_buffer, 1);
7a8e76a3
SR
4828 }
4829
4830 return NULL;
4831}
c4f50183 4832EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 4833
289a5a25 4834static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
8d707e8e 4835{
289a5a25
SRRH
4836 if (likely(!in_nmi())) {
4837 raw_spin_lock(&cpu_buffer->reader_lock);
4838 return true;
4839 }
4840
8d707e8e
SR
4841 /*
4842 * If an NMI die dumps out the content of the ring buffer
289a5a25
SRRH
4843 * trylock must be used to prevent a deadlock if the NMI
4844 * preempted a task that holds the ring buffer locks. If
4845 * we get the lock then all is fine, if not, then continue
4846 * to do the read, but this can corrupt the ring buffer,
4847 * so it must be permanently disabled from future writes.
4848 * Reading from NMI is a oneshot deal.
8d707e8e 4849 */
289a5a25
SRRH
4850 if (raw_spin_trylock(&cpu_buffer->reader_lock))
4851 return true;
8d707e8e 4852
289a5a25
SRRH
4853 /* Continue without locking, but disable the ring buffer */
4854 atomic_inc(&cpu_buffer->record_disabled);
4855 return false;
4856}
4857
4858static inline void
4859rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4860{
4861 if (likely(locked))
4862 raw_spin_unlock(&cpu_buffer->reader_lock);
8d707e8e
SR
4863}
4864
f83c9d0f
SR
4865/**
4866 * ring_buffer_peek - peek at the next event to be read
4867 * @buffer: The ring buffer to read
4868 * @cpu: The cpu to peak at
4869 * @ts: The timestamp counter of this event.
66a8cb95 4870 * @lost_events: a variable to store if events were lost (may be NULL)
f83c9d0f
SR
4871 *
4872 * This will return the event that will be read next, but does
4873 * not consume the data.
4874 */
4875struct ring_buffer_event *
13292494 4876ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
66a8cb95 4877 unsigned long *lost_events)
f83c9d0f
SR
4878{
4879 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 4880 struct ring_buffer_event *event;
f83c9d0f 4881 unsigned long flags;
289a5a25 4882 bool dolock;
f83c9d0f 4883
554f786e 4884 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4885 return NULL;
554f786e 4886
2d622719 4887 again:
8d707e8e 4888 local_irq_save(flags);
289a5a25 4889 dolock = rb_reader_lock(cpu_buffer);
66a8cb95 4890 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
469535a5
RR
4891 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4892 rb_advance_reader(cpu_buffer);
289a5a25 4893 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e 4894 local_irq_restore(flags);
f83c9d0f 4895
1b959e18 4896 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 4897 goto again;
2d622719 4898
f83c9d0f
SR
4899 return event;
4900}
4901
c9b7a4a7
SRV
4902/** ring_buffer_iter_dropped - report if there are dropped events
4903 * @iter: The ring buffer iterator
4904 *
4905 * Returns true if there was dropped events since the last peek.
4906 */
4907bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4908{
4909 bool ret = iter->missed_events != 0;
4910
4911 iter->missed_events = 0;
4912 return ret;
4913}
4914EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4915
f83c9d0f
SR
4916/**
4917 * ring_buffer_iter_peek - peek at the next event to be read
4918 * @iter: The ring buffer iterator
4919 * @ts: The timestamp counter of this event.
4920 *
4921 * This will return the event that will be read next, but does
4922 * not increment the iterator.
4923 */
4924struct ring_buffer_event *
4925ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4926{
4927 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4928 struct ring_buffer_event *event;
4929 unsigned long flags;
4930
2d622719 4931 again:
5389f6fa 4932 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
f83c9d0f 4933 event = rb_iter_peek(iter, ts);
5389f6fa 4934 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
f83c9d0f 4935
1b959e18 4936 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 4937 goto again;
2d622719 4938
f83c9d0f
SR
4939 return event;
4940}
4941
7a8e76a3
SR
4942/**
4943 * ring_buffer_consume - return an event and consume it
4944 * @buffer: The ring buffer to get the next event from
66a8cb95
SR
4945 * @cpu: the cpu to read the buffer from
4946 * @ts: a variable to store the timestamp (may be NULL)
4947 * @lost_events: a variable to store if events were lost (may be NULL)
7a8e76a3
SR
4948 *
4949 * Returns the next event in the ring buffer, and that event is consumed.
4950 * Meaning, that sequential reads will keep returning a different event,
4951 * and eventually empty the ring buffer if the producer is slower.
4952 */
4953struct ring_buffer_event *
13292494 4954ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
66a8cb95 4955 unsigned long *lost_events)
7a8e76a3 4956{
554f786e
SR
4957 struct ring_buffer_per_cpu *cpu_buffer;
4958 struct ring_buffer_event *event = NULL;
f83c9d0f 4959 unsigned long flags;
289a5a25 4960 bool dolock;
7a8e76a3 4961
2d622719 4962 again:
554f786e
SR
4963 /* might be called in atomic */
4964 preempt_disable();
4965
9e01c1b7 4966 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 4967 goto out;
7a8e76a3 4968
554f786e 4969 cpu_buffer = buffer->buffers[cpu];
8d707e8e 4970 local_irq_save(flags);
289a5a25 4971 dolock = rb_reader_lock(cpu_buffer);
f83c9d0f 4972
66a8cb95
SR
4973 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4974 if (event) {
4975 cpu_buffer->lost_events = 0;
469535a5 4976 rb_advance_reader(cpu_buffer);
66a8cb95 4977 }
7a8e76a3 4978
289a5a25 4979 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e 4980 local_irq_restore(flags);
f83c9d0f 4981
554f786e
SR
4982 out:
4983 preempt_enable();
4984
1b959e18 4985 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 4986 goto again;
2d622719 4987
7a8e76a3
SR
4988 return event;
4989}
c4f50183 4990EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
4991
4992/**
72c9ddfd 4993 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
7a8e76a3
SR
4994 * @buffer: The ring buffer to read from
4995 * @cpu: The cpu buffer to iterate over
31b265b3 4996 * @flags: gfp flags to use for memory allocation
7a8e76a3 4997 *
72c9ddfd
DM
4998 * This performs the initial preparations necessary to iterate
4999 * through the buffer. Memory is allocated, buffer recording
5000 * is disabled, and the iterator pointer is returned to the caller.
7a8e76a3 5001 *
6167c205 5002 * Disabling buffer recording prevents the reading from being
72c9ddfd
DM
5003 * corrupted. This is not a consuming read, so a producer is not
5004 * expected.
5005 *
5006 * After a sequence of ring_buffer_read_prepare calls, the user is
d611851b 5007 * expected to make at least one call to ring_buffer_read_prepare_sync.
72c9ddfd
DM
5008 * Afterwards, ring_buffer_read_start is invoked to get things going
5009 * for real.
5010 *
d611851b 5011 * This overall must be paired with ring_buffer_read_finish.
7a8e76a3
SR
5012 */
5013struct ring_buffer_iter *
13292494 5014ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
7a8e76a3
SR
5015{
5016 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 5017 struct ring_buffer_iter *iter;
7a8e76a3 5018
9e01c1b7 5019 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 5020 return NULL;
7a8e76a3 5021
785888c5 5022 iter = kzalloc(sizeof(*iter), flags);
7a8e76a3 5023 if (!iter)
8aabee57 5024 return NULL;
7a8e76a3 5025
b0495258 5026 /* Holds the entire event: data and meta data */
139f8400
TSV
5027 iter->event_size = buffer->subbuf_size;
5028 iter->event = kmalloc(iter->event_size, flags);
785888c5
SRV
5029 if (!iter->event) {
5030 kfree(iter);
5031 return NULL;
5032 }
5033
7a8e76a3
SR
5034 cpu_buffer = buffer->buffers[cpu];
5035
5036 iter->cpu_buffer = cpu_buffer;
5037
07b8b10e 5038 atomic_inc(&cpu_buffer->resize_disabled);
72c9ddfd
DM
5039
5040 return iter;
5041}
5042EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5043
5044/**
5045 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5046 *
5047 * All previously invoked ring_buffer_read_prepare calls to prepare
5048 * iterators will be synchronized. Afterwards, read_buffer_read_start
5049 * calls on those iterators are allowed.
5050 */
5051void
5052ring_buffer_read_prepare_sync(void)
5053{
74401729 5054 synchronize_rcu();
72c9ddfd
DM
5055}
5056EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5057
5058/**
5059 * ring_buffer_read_start - start a non consuming read of the buffer
5060 * @iter: The iterator returned by ring_buffer_read_prepare
5061 *
5062 * This finalizes the startup of an iteration through the buffer.
5063 * The iterator comes from a call to ring_buffer_read_prepare and
5064 * an intervening ring_buffer_read_prepare_sync must have been
5065 * performed.
5066 *
d611851b 5067 * Must be paired with ring_buffer_read_finish.
72c9ddfd
DM
5068 */
5069void
5070ring_buffer_read_start(struct ring_buffer_iter *iter)
5071{
5072 struct ring_buffer_per_cpu *cpu_buffer;
5073 unsigned long flags;
5074
5075 if (!iter)
5076 return;
5077
5078 cpu_buffer = iter->cpu_buffer;
7a8e76a3 5079
5389f6fa 5080 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
0199c4e6 5081 arch_spin_lock(&cpu_buffer->lock);
642edba5 5082 rb_iter_reset(iter);
0199c4e6 5083 arch_spin_unlock(&cpu_buffer->lock);
5389f6fa 5084 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 5085}
c4f50183 5086EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
5087
5088/**
d611851b 5089 * ring_buffer_read_finish - finish reading the iterator of the buffer
7a8e76a3
SR
5090 * @iter: The iterator retrieved by ring_buffer_start
5091 *
5092 * This re-enables the recording to the buffer, and frees the
5093 * iterator.
5094 */
5095void
5096ring_buffer_read_finish(struct ring_buffer_iter *iter)
5097{
5098 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
9366c1ba 5099 unsigned long flags;
7a8e76a3 5100
659f451f
SR
5101 /*
5102 * Ring buffer is disabled from recording, here's a good place
9366c1ba
SR
5103 * to check the integrity of the ring buffer.
5104 * Must prevent readers from trying to read, as the check
5105 * clears the HEAD page and readers require it.
659f451f 5106 */
9366c1ba 5107 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
659f451f 5108 rb_check_pages(cpu_buffer);
9366c1ba 5109 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
659f451f 5110
07b8b10e 5111 atomic_dec(&cpu_buffer->resize_disabled);
785888c5 5112 kfree(iter->event);
7a8e76a3
SR
5113 kfree(iter);
5114}
c4f50183 5115EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
5116
5117/**
bc1a72af 5118 * ring_buffer_iter_advance - advance the iterator to the next location
7a8e76a3 5119 * @iter: The ring buffer iterator
7a8e76a3 5120 *
bc1a72af
SRV
5121 * Move the location of the iterator such that the next read will
5122 * be the next location of the iterator.
7a8e76a3 5123 */
bc1a72af 5124void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
7a8e76a3 5125{
f83c9d0f
SR
5126 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5127 unsigned long flags;
7a8e76a3 5128
5389f6fa 5129 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7e9391cf 5130
7a8e76a3
SR
5131 rb_advance_iter(iter);
5132
bc1a72af 5133 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 5134}
bc1a72af 5135EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
7a8e76a3
SR
5136
5137/**
5138 * ring_buffer_size - return the size of the ring buffer (in bytes)
5139 * @buffer: The ring buffer.
59e7cffe 5140 * @cpu: The CPU to get ring buffer size from.
7a8e76a3 5141 */
13292494 5142unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
7a8e76a3 5143{
438ced17
VN
5144 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5145 return 0;
5146
139f8400 5147 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
7a8e76a3 5148}
c4f50183 5149EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3 5150
8ec90be7
SRG
5151/**
5152 * ring_buffer_max_event_size - return the max data size of an event
5153 * @buffer: The ring buffer.
5154 *
5155 * Returns the maximum size an event can be.
5156 */
5157unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5158{
5159 /* If abs timestamp is requested, events have a timestamp too */
5160 if (ring_buffer_time_stamp_abs(buffer))
139f8400
TSV
5161 return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5162 return buffer->max_data_size;
8ec90be7
SRG
5163}
5164EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5165
7e42907f
ZY
5166static void rb_clear_buffer_page(struct buffer_page *page)
5167{
5168 local_set(&page->write, 0);
5169 local_set(&page->entries, 0);
5170 rb_init_page(page->page);
5171 page->read = 0;
5172}
5173
7a8e76a3
SR
5174static void
5175rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5176{
7e42907f
ZY
5177 struct buffer_page *page;
5178
77ae365e
SR
5179 rb_head_page_deactivate(cpu_buffer);
5180
7a8e76a3 5181 cpu_buffer->head_page
3adc54fa 5182 = list_entry(cpu_buffer->pages, struct buffer_page, list);
7e42907f
ZY
5183 rb_clear_buffer_page(cpu_buffer->head_page);
5184 list_for_each_entry(page, cpu_buffer->pages, list) {
5185 rb_clear_buffer_page(page);
5186 }
bf41a158
SR
5187
5188 cpu_buffer->tail_page = cpu_buffer->head_page;
5189 cpu_buffer->commit_page = cpu_buffer->head_page;
5190
5191 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5040b4b7 5192 INIT_LIST_HEAD(&cpu_buffer->new_pages);
7e42907f 5193 rb_clear_buffer_page(cpu_buffer->reader_page);
7a8e76a3 5194
c64e148a 5195 local_set(&cpu_buffer->entries_bytes, 0);
77ae365e 5196 local_set(&cpu_buffer->overrun, 0);
884bfe89
SP
5197 local_set(&cpu_buffer->commit_overrun, 0);
5198 local_set(&cpu_buffer->dropped_events, 0);
e4906eff 5199 local_set(&cpu_buffer->entries, 0);
fa743953
SR
5200 local_set(&cpu_buffer->committing, 0);
5201 local_set(&cpu_buffer->commits, 0);
2c2b0a78 5202 local_set(&cpu_buffer->pages_touched, 0);
31029a8b 5203 local_set(&cpu_buffer->pages_lost, 0);
2c2b0a78 5204 local_set(&cpu_buffer->pages_read, 0);
03329f99 5205 cpu_buffer->last_pages_touch = 0;
2c2b0a78 5206 cpu_buffer->shortest_full = 0;
77ae365e 5207 cpu_buffer->read = 0;
c64e148a 5208 cpu_buffer->read_bytes = 0;
69507c06 5209
10464b4a
SRV
5210 rb_time_set(&cpu_buffer->write_stamp, 0);
5211 rb_time_set(&cpu_buffer->before_stamp, 0);
77ae365e 5212
8672e494
SRV
5213 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5214
66a8cb95
SR
5215 cpu_buffer->lost_events = 0;
5216 cpu_buffer->last_overrun = 0;
5217
77ae365e 5218 rb_head_page_activate(cpu_buffer);
2d093282 5219 cpu_buffer->pages_removed = 0;
7a8e76a3
SR
5220}
5221
b23d7a5f
NP
5222/* Must have disabled the cpu buffer then done a synchronize_rcu */
5223static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5224{
5225 unsigned long flags;
5226
5227 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5228
5229 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5230 goto out;
5231
5232 arch_spin_lock(&cpu_buffer->lock);
5233
5234 rb_reset_cpu(cpu_buffer);
5235
5236 arch_spin_unlock(&cpu_buffer->lock);
5237
5238 out:
5239 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5240}
5241
7a8e76a3
SR
5242/**
5243 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5244 * @buffer: The ring buffer to reset a per cpu buffer of
5245 * @cpu: The CPU buffer to be reset
5246 */
13292494 5247void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
5248{
5249 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
7a8e76a3 5250
9e01c1b7 5251 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 5252 return;
7a8e76a3 5253
bbeb9746
GK
5254 /* prevent another thread from changing buffer sizes */
5255 mutex_lock(&buffer->mutex);
5256
07b8b10e 5257 atomic_inc(&cpu_buffer->resize_disabled);
41ede23e
SR
5258 atomic_inc(&cpu_buffer->record_disabled);
5259
83f40318 5260 /* Make sure all commits have finished */
74401729 5261 synchronize_rcu();
83f40318 5262
b23d7a5f 5263 reset_disabled_cpu_buffer(cpu_buffer);
f83c9d0f 5264
b23d7a5f
NP
5265 atomic_dec(&cpu_buffer->record_disabled);
5266 atomic_dec(&cpu_buffer->resize_disabled);
bbeb9746
GK
5267
5268 mutex_unlock(&buffer->mutex);
b23d7a5f
NP
5269}
5270EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
41b6a95d 5271
7c339fb4
TW
5272/* Flag to ensure proper resetting of atomic variables */
5273#define RESET_BIT (1 << 30)
5274
b23d7a5f 5275/**
b7085b6f 5276 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
b23d7a5f 5277 * @buffer: The ring buffer to reset a per cpu buffer of
b23d7a5f
NP
5278 */
5279void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5280{
5281 struct ring_buffer_per_cpu *cpu_buffer;
5282 int cpu;
7a8e76a3 5283
bbeb9746
GK
5284 /* prevent another thread from changing buffer sizes */
5285 mutex_lock(&buffer->mutex);
5286
b23d7a5f
NP
5287 for_each_online_buffer_cpu(buffer, cpu) {
5288 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 5289
7c339fb4 5290 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
b23d7a5f
NP
5291 atomic_inc(&cpu_buffer->record_disabled);
5292 }
f83c9d0f 5293
b23d7a5f
NP
5294 /* Make sure all commits have finished */
5295 synchronize_rcu();
41ede23e 5296
7c339fb4 5297 for_each_buffer_cpu(buffer, cpu) {
b23d7a5f
NP
5298 cpu_buffer = buffer->buffers[cpu];
5299
7c339fb4
TW
5300 /*
5301 * If a CPU came online during the synchronize_rcu(), then
5302 * ignore it.
5303 */
5304 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
5305 continue;
5306
b23d7a5f
NP
5307 reset_disabled_cpu_buffer(cpu_buffer);
5308
5309 atomic_dec(&cpu_buffer->record_disabled);
7c339fb4 5310 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
b23d7a5f 5311 }
bbeb9746
GK
5312
5313 mutex_unlock(&buffer->mutex);
7a8e76a3
SR
5314}
5315
5316/**
5317 * ring_buffer_reset - reset a ring buffer
5318 * @buffer: The ring buffer to reset all cpu buffers
5319 */
13292494 5320void ring_buffer_reset(struct trace_buffer *buffer)
7a8e76a3 5321{
b23d7a5f 5322 struct ring_buffer_per_cpu *cpu_buffer;
7a8e76a3
SR
5323 int cpu;
5324
51d15794
SRV
5325 /* prevent another thread from changing buffer sizes */
5326 mutex_lock(&buffer->mutex);
5327
b23d7a5f
NP
5328 for_each_buffer_cpu(buffer, cpu) {
5329 cpu_buffer = buffer->buffers[cpu];
5330
5331 atomic_inc(&cpu_buffer->resize_disabled);
5332 atomic_inc(&cpu_buffer->record_disabled);
5333 }
5334
5335 /* Make sure all commits have finished */
5336 synchronize_rcu();
5337
5338 for_each_buffer_cpu(buffer, cpu) {
5339 cpu_buffer = buffer->buffers[cpu];
5340
5341 reset_disabled_cpu_buffer(cpu_buffer);
5342
5343 atomic_dec(&cpu_buffer->record_disabled);
5344 atomic_dec(&cpu_buffer->resize_disabled);
5345 }
51d15794
SRV
5346
5347 mutex_unlock(&buffer->mutex);
7a8e76a3 5348}
c4f50183 5349EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
5350
5351/**
b7085b6f 5352 * ring_buffer_empty - is the ring buffer empty?
7a8e76a3
SR
5353 * @buffer: The ring buffer to test
5354 */
13292494 5355bool ring_buffer_empty(struct trace_buffer *buffer)
7a8e76a3
SR
5356{
5357 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 5358 unsigned long flags;
289a5a25 5359 bool dolock;
bc92b956 5360 bool ret;
7a8e76a3
SR
5361 int cpu;
5362
5363 /* yes this is racy, but if you don't like the race, lock the buffer */
5364 for_each_buffer_cpu(buffer, cpu) {
5365 cpu_buffer = buffer->buffers[cpu];
8d707e8e 5366 local_irq_save(flags);
289a5a25 5367 dolock = rb_reader_lock(cpu_buffer);
d4788207 5368 ret = rb_per_cpu_empty(cpu_buffer);
289a5a25 5369 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e
SR
5370 local_irq_restore(flags);
5371
d4788207 5372 if (!ret)
3d4e204d 5373 return false;
7a8e76a3 5374 }
554f786e 5375
3d4e204d 5376 return true;
7a8e76a3 5377}
c4f50183 5378EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
5379
5380/**
5381 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5382 * @buffer: The ring buffer
5383 * @cpu: The CPU buffer to test
5384 */
13292494 5385bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
5386{
5387 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 5388 unsigned long flags;
289a5a25 5389 bool dolock;
bc92b956 5390 bool ret;
7a8e76a3 5391
9e01c1b7 5392 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3d4e204d 5393 return true;
7a8e76a3
SR
5394
5395 cpu_buffer = buffer->buffers[cpu];
8d707e8e 5396 local_irq_save(flags);
289a5a25 5397 dolock = rb_reader_lock(cpu_buffer);
554f786e 5398 ret = rb_per_cpu_empty(cpu_buffer);
289a5a25 5399 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e 5400 local_irq_restore(flags);
554f786e
SR
5401
5402 return ret;
7a8e76a3 5403}
c4f50183 5404EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3 5405
85bac32c 5406#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
7a8e76a3
SR
5407/**
5408 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5409 * @buffer_a: One buffer to swap with
5410 * @buffer_b: The other buffer to swap with
59e7cffe 5411 * @cpu: the CPU of the buffers to swap
7a8e76a3
SR
5412 *
5413 * This function is useful for tracers that want to take a "snapshot"
5414 * of a CPU buffer and has another back up buffer lying around.
5415 * it is expected that the tracer handles the cpu buffer not being
5416 * used at the moment.
5417 */
13292494
SRV
5418int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5419 struct trace_buffer *buffer_b, int cpu)
7a8e76a3
SR
5420{
5421 struct ring_buffer_per_cpu *cpu_buffer_a;
5422 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
5423 int ret = -EINVAL;
5424
9e01c1b7
RR
5425 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5426 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 5427 goto out;
7a8e76a3 5428
438ced17
VN
5429 cpu_buffer_a = buffer_a->buffers[cpu];
5430 cpu_buffer_b = buffer_b->buffers[cpu];
5431
7a8e76a3 5432 /* At least make sure the two buffers are somewhat the same */
438ced17 5433 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
554f786e
SR
5434 goto out;
5435
b81e03a2
SRG
5436 if (buffer_a->subbuf_order != buffer_b->subbuf_order)
5437 goto out;
5438
554f786e 5439 ret = -EAGAIN;
7a8e76a3 5440
97b17efe 5441 if (atomic_read(&buffer_a->record_disabled))
554f786e 5442 goto out;
97b17efe
SR
5443
5444 if (atomic_read(&buffer_b->record_disabled))
554f786e 5445 goto out;
97b17efe 5446
97b17efe 5447 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 5448 goto out;
97b17efe
SR
5449
5450 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 5451 goto out;
97b17efe 5452
7a8e76a3 5453 /*
74401729 5454 * We can't do a synchronize_rcu here because this
7a8e76a3
SR
5455 * function can be called in atomic context.
5456 * Normally this will be called from the same CPU as cpu.
5457 * If not it's up to the caller to protect this.
5458 */
5459 atomic_inc(&cpu_buffer_a->record_disabled);
5460 atomic_inc(&cpu_buffer_b->record_disabled);
5461
98277991
SR
5462 ret = -EBUSY;
5463 if (local_read(&cpu_buffer_a->committing))
5464 goto out_dec;
5465 if (local_read(&cpu_buffer_b->committing))
5466 goto out_dec;
5467
8a96c028
CL
5468 /*
5469 * When resize is in progress, we cannot swap it because
5470 * it will mess the state of the cpu buffer.
5471 */
5472 if (atomic_read(&buffer_a->resizing))
5473 goto out_dec;
5474 if (atomic_read(&buffer_b->resizing))
5475 goto out_dec;
5476
7a8e76a3
SR
5477 buffer_a->buffers[cpu] = cpu_buffer_b;
5478 buffer_b->buffers[cpu] = cpu_buffer_a;
5479
5480 cpu_buffer_b->buffer = buffer_a;
5481 cpu_buffer_a->buffer = buffer_b;
5482
98277991
SR
5483 ret = 0;
5484
5485out_dec:
7a8e76a3
SR
5486 atomic_dec(&cpu_buffer_a->record_disabled);
5487 atomic_dec(&cpu_buffer_b->record_disabled);
554f786e 5488out:
554f786e 5489 return ret;
7a8e76a3 5490}
c4f50183 5491EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
85bac32c 5492#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
7a8e76a3 5493
8789a9e7
SR
5494/**
5495 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5496 * @buffer: the buffer to allocate for.
d611851b 5497 * @cpu: the cpu buffer to allocate.
8789a9e7
SR
5498 *
5499 * This function is used in conjunction with ring_buffer_read_page.
5500 * When reading a full page from the ring buffer, these functions
5501 * can be used to speed up the process. The calling function should
5502 * allocate a few pages first with this function. Then when it
5503 * needs to get pages from the ring buffer, it passes the result
5504 * of this function into ring_buffer_read_page, which will swap
5505 * the page that was allocated, with the read page of the buffer.
5506 *
5507 * Returns:
a7e52ad7 5508 * The page allocated, or ERR_PTR
8789a9e7 5509 */
bce761d7
TSV
5510struct buffer_data_read_page *
5511ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
8789a9e7 5512{
a7e52ad7 5513 struct ring_buffer_per_cpu *cpu_buffer;
bce761d7 5514 struct buffer_data_read_page *bpage = NULL;
73a757e6 5515 unsigned long flags;
7ea59064 5516 struct page *page;
8789a9e7 5517
a7e52ad7
SRV
5518 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5519 return ERR_PTR(-ENODEV);
5520
bce761d7
TSV
5521 bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
5522 if (!bpage)
5523 return ERR_PTR(-ENOMEM);
5524
5525 bpage->order = buffer->subbuf_order;
a7e52ad7 5526 cpu_buffer = buffer->buffers[cpu];
73a757e6
SRV
5527 local_irq_save(flags);
5528 arch_spin_lock(&cpu_buffer->lock);
5529
5530 if (cpu_buffer->free_page) {
bce761d7 5531 bpage->data = cpu_buffer->free_page;
73a757e6
SRV
5532 cpu_buffer->free_page = NULL;
5533 }
5534
5535 arch_spin_unlock(&cpu_buffer->lock);
5536 local_irq_restore(flags);
5537
bce761d7 5538 if (bpage->data)
73a757e6
SRV
5539 goto out;
5540
f9b94daa
TSV
5541 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
5542 cpu_buffer->buffer->subbuf_order);
bce761d7
TSV
5543 if (!page) {
5544 kfree(bpage);
a7e52ad7 5545 return ERR_PTR(-ENOMEM);
bce761d7 5546 }
8789a9e7 5547
bce761d7 5548 bpage->data = page_address(page);
8789a9e7 5549
73a757e6 5550 out:
bce761d7 5551 rb_init_page(bpage->data);
ef7a4a16 5552
044fa782 5553 return bpage;
8789a9e7 5554}
d6ce96da 5555EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
5556
5557/**
5558 * ring_buffer_free_read_page - free an allocated read page
5559 * @buffer: the buffer the page was allocate for
73a757e6 5560 * @cpu: the cpu buffer the page came from
bce761d7 5561 * @data_page: the page to free
8789a9e7
SR
5562 *
5563 * Free a page allocated from ring_buffer_alloc_read_page.
5564 */
bce761d7
TSV
5565void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
5566 struct buffer_data_read_page *data_page)
8789a9e7 5567{
3e4272b9 5568 struct ring_buffer_per_cpu *cpu_buffer;
bce761d7 5569 struct buffer_data_page *bpage = data_page->data;
ae415fa4 5570 struct page *page = virt_to_page(bpage);
73a757e6
SRV
5571 unsigned long flags;
5572
3e4272b9
JJB
5573 if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5574 return;
5575
5576 cpu_buffer = buffer->buffers[cpu];
5577
bce761d7
TSV
5578 /*
5579 * If the page is still in use someplace else, or order of the page
5580 * is different from the subbuffer order of the buffer -
5581 * we can't reuse it
5582 */
5583 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
ae415fa4
SRV
5584 goto out;
5585
73a757e6
SRV
5586 local_irq_save(flags);
5587 arch_spin_lock(&cpu_buffer->lock);
5588
5589 if (!cpu_buffer->free_page) {
5590 cpu_buffer->free_page = bpage;
5591 bpage = NULL;
5592 }
5593
5594 arch_spin_unlock(&cpu_buffer->lock);
5595 local_irq_restore(flags);
5596
ae415fa4 5597 out:
bce761d7
TSV
5598 free_pages((unsigned long)bpage, data_page->order);
5599 kfree(data_page);
8789a9e7 5600}
d6ce96da 5601EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
5602
5603/**
5604 * ring_buffer_read_page - extract a page from the ring buffer
5605 * @buffer: buffer to extract from
5606 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 5607 * @len: amount to extract
8789a9e7
SR
5608 * @cpu: the cpu of the buffer to extract
5609 * @full: should the extraction only happen when the page is full.
5610 *
5611 * This function will pull out a page from the ring buffer and consume it.
5612 * @data_page must be the address of the variable that was returned
5613 * from ring_buffer_alloc_read_page. This is because the page might be used
5614 * to swap with a page in the ring buffer.
5615 *
5616 * for example:
d611851b 5617 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
a7e52ad7
SRV
5618 * if (IS_ERR(rpage))
5619 * return PTR_ERR(rpage);
bce761d7 5620 * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
667d2412 5621 * if (ret >= 0)
bce761d7
TSV
5622 * process_page(ring_buffer_read_page_data(rpage), ret);
5623 * ring_buffer_free_read_page(buffer, cpu, rpage);
8789a9e7
SR
5624 *
5625 * When @full is set, the function will not return true unless
5626 * the writer is off the reader page.
5627 *
5628 * Note: it is up to the calling functions to handle sleeps and wakeups.
5629 * The ring buffer can be used anywhere in the kernel and can not
5630 * blindly call wake_up. The layer that uses the ring buffer must be
5631 * responsible for that.
5632 *
5633 * Returns:
667d2412
LJ
5634 * >=0 if data has been transferred, returns the offset of consumed data.
5635 * <0 if no data has been transferred.
8789a9e7 5636 */
13292494 5637int ring_buffer_read_page(struct trace_buffer *buffer,
bce761d7
TSV
5638 struct buffer_data_read_page *data_page,
5639 size_t len, int cpu, int full)
8789a9e7
SR
5640{
5641 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5642 struct ring_buffer_event *event;
044fa782 5643 struct buffer_data_page *bpage;
ef7a4a16 5644 struct buffer_page *reader;
ff0ff84a 5645 unsigned long missed_events;
8789a9e7 5646 unsigned long flags;
ef7a4a16 5647 unsigned int commit;
667d2412 5648 unsigned int read;
4f3640f8 5649 u64 save_timestamp;
667d2412 5650 int ret = -1;
8789a9e7 5651
554f786e
SR
5652 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5653 goto out;
5654
474d32b6
SR
5655 /*
5656 * If len is not big enough to hold the page header, then
5657 * we can not copy anything.
5658 */
5659 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 5660 goto out;
474d32b6
SR
5661
5662 len -= BUF_PAGE_HDR_SIZE;
5663
bce761d7
TSV
5664 if (!data_page || !data_page->data)
5665 goto out;
5666 if (data_page->order != buffer->subbuf_order)
554f786e 5667 goto out;
8789a9e7 5668
bce761d7 5669 bpage = data_page->data;
044fa782 5670 if (!bpage)
554f786e 5671 goto out;
8789a9e7 5672
5389f6fa 5673 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
8789a9e7 5674
ef7a4a16
SR
5675 reader = rb_get_reader_page(cpu_buffer);
5676 if (!reader)
554f786e 5677 goto out_unlock;
8789a9e7 5678
ef7a4a16
SR
5679 event = rb_reader_event(cpu_buffer);
5680
5681 read = reader->read;
5682 commit = rb_page_commit(reader);
667d2412 5683
66a8cb95 5684 /* Check if any events were dropped */
ff0ff84a 5685 missed_events = cpu_buffer->lost_events;
66a8cb95 5686
8789a9e7 5687 /*
474d32b6
SR
5688 * If this page has been partially read or
5689 * if len is not big enough to read the rest of the page or
5690 * a writer is still on the page, then
5691 * we must copy the data from the page to the buffer.
5692 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 5693 */
474d32b6 5694 if (read || (len < (commit - read)) ||
ef7a4a16 5695 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 5696 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
5697 unsigned int rpos = read;
5698 unsigned int pos = 0;
ef7a4a16 5699 unsigned int size;
8789a9e7 5700
fa8f4a89
SRG
5701 /*
5702 * If a full page is expected, this can still be returned
5703 * if there's been a previous partial read and the
5704 * rest of the page can be read and the commit page is off
5705 * the reader page.
5706 */
5707 if (full &&
5708 (!read || (len < (commit - read)) ||
5709 cpu_buffer->reader_page == cpu_buffer->commit_page))
554f786e 5710 goto out_unlock;
8789a9e7 5711
ef7a4a16
SR
5712 if (len > (commit - read))
5713 len = (commit - read);
5714
69d1b839
SR
5715 /* Always keep the time extend and data together */
5716 size = rb_event_ts_length(event);
ef7a4a16
SR
5717
5718 if (len < size)
554f786e 5719 goto out_unlock;
ef7a4a16 5720
4f3640f8
SR
5721 /* save the current timestamp, since the user will need it */
5722 save_timestamp = cpu_buffer->read_stamp;
5723
ef7a4a16
SR
5724 /* Need to copy one event at a time */
5725 do {
e1e35927
DS
5726 /* We need the size of one event, because
5727 * rb_advance_reader only advances by one event,
5728 * whereas rb_event_ts_length may include the size of
5729 * one or two events.
5730 * We have already ensured there's enough space if this
5731 * is a time extend. */
5732 size = rb_event_length(event);
474d32b6 5733 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
5734
5735 len -= size;
5736
5737 rb_advance_reader(cpu_buffer);
474d32b6
SR
5738 rpos = reader->read;
5739 pos += size;
ef7a4a16 5740
18fab912
HY
5741 if (rpos >= commit)
5742 break;
5743
ef7a4a16 5744 event = rb_reader_event(cpu_buffer);
69d1b839
SR
5745 /* Always keep the time extend and data together */
5746 size = rb_event_ts_length(event);
e1e35927 5747 } while (len >= size);
667d2412
LJ
5748
5749 /* update bpage */
ef7a4a16 5750 local_set(&bpage->commit, pos);
4f3640f8 5751 bpage->time_stamp = save_timestamp;
ef7a4a16 5752
474d32b6
SR
5753 /* we copied everything to the beginning */
5754 read = 0;
8789a9e7 5755 } else {
afbab76a 5756 /* update the entry counter */
77ae365e 5757 cpu_buffer->read += rb_page_entries(reader);
45d99ea4 5758 cpu_buffer->read_bytes += rb_page_commit(reader);
afbab76a 5759
8789a9e7 5760 /* swap the pages */
044fa782 5761 rb_init_page(bpage);
ef7a4a16 5762 bpage = reader->page;
bce761d7 5763 reader->page = data_page->data;
ef7a4a16 5764 local_set(&reader->write, 0);
778c55d4 5765 local_set(&reader->entries, 0);
ef7a4a16 5766 reader->read = 0;
bce761d7 5767 data_page->data = bpage;
ff0ff84a
SR
5768
5769 /*
5770 * Use the real_end for the data size,
5771 * This gives us a chance to store the lost events
5772 * on the page.
5773 */
5774 if (reader->real_end)
5775 local_set(&bpage->commit, reader->real_end);
8789a9e7 5776 }
667d2412 5777 ret = read;
8789a9e7 5778
66a8cb95 5779 cpu_buffer->lost_events = 0;
2711ca23
SR
5780
5781 commit = local_read(&bpage->commit);
66a8cb95
SR
5782 /*
5783 * Set a flag in the commit field if we lost events
5784 */
ff0ff84a 5785 if (missed_events) {
ff0ff84a
SR
5786 /* If there is room at the end of the page to save the
5787 * missed events, then record it there.
5788 */
139f8400 5789 if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
ff0ff84a
SR
5790 memcpy(&bpage->data[commit], &missed_events,
5791 sizeof(missed_events));
5792 local_add(RB_MISSED_STORED, &bpage->commit);
2711ca23 5793 commit += sizeof(missed_events);
ff0ff84a 5794 }
66a8cb95 5795 local_add(RB_MISSED_EVENTS, &bpage->commit);
ff0ff84a 5796 }
66a8cb95 5797
2711ca23
SR
5798 /*
5799 * This page may be off to user land. Zero it out here.
5800 */
139f8400
TSV
5801 if (commit < buffer->subbuf_size)
5802 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
2711ca23 5803
554f786e 5804 out_unlock:
5389f6fa 5805 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
8789a9e7 5806
554f786e 5807 out:
8789a9e7
SR
5808 return ret;
5809}
d6ce96da 5810EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 5811
bce761d7
TSV
5812/**
5813 * ring_buffer_read_page_data - get pointer to the data in the page.
5814 * @page: the page to get the data from
5815 *
5816 * Returns pointer to the actual data in this page.
5817 */
5818void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
5819{
5820 return page->data;
5821}
5822EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
5823
2808e31e
TSV
5824/**
5825 * ring_buffer_subbuf_size_get - get size of the sub buffer.
5826 * @buffer: the buffer to get the sub buffer size from
5827 *
5828 * Returns size of the sub buffer, in bytes.
5829 */
5830int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
5831{
5832 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
5833}
5834EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
5835
5836/**
5837 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5838 * @buffer: The ring_buffer to get the system sub page order from
5839 *
5840 * By default, one ring buffer sub page equals to one system page. This parameter
5841 * is configurable, per ring buffer. The size of the ring buffer sub page can be
5842 * extended, but must be an order of system page size.
5843 *
5844 * Returns the order of buffer sub page size, in system pages:
5845 * 0 means the sub buffer size is 1 system page and so forth.
5846 * In case of an error < 0 is returned.
5847 */
5848int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
5849{
5850 if (!buffer)
5851 return -EINVAL;
5852
5853 return buffer->subbuf_order;
5854}
5855EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
5856
5857/**
5858 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5859 * @buffer: The ring_buffer to set the new page size.
5860 * @order: Order of the system pages in one sub buffer page
5861 *
5862 * By default, one ring buffer pages equals to one system page. This API can be
5863 * used to set new size of the ring buffer page. The size must be order of
5864 * system page size, that's why the input parameter @order is the order of
5865 * system pages that are allocated for one ring buffer page:
5866 * 0 - 1 system page
5867 * 1 - 2 system pages
5868 * 3 - 4 system pages
5869 * ...
5870 *
5871 * Returns 0 on success or < 0 in case of an error.
5872 */
5873int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
5874{
8e7b58c2
SRG
5875 struct ring_buffer_per_cpu *cpu_buffer;
5876 struct buffer_page *bpage, *tmp;
f9b94daa
TSV
5877 int old_order, old_size;
5878 int nr_pages;
2808e31e 5879 int psize;
f9b94daa
TSV
5880 int err;
5881 int cpu;
2808e31e
TSV
5882
5883 if (!buffer || order < 0)
5884 return -EINVAL;
5885
5886 if (buffer->subbuf_order == order)
5887 return 0;
5888
5889 psize = (1 << order) * PAGE_SIZE;
5890 if (psize <= BUF_PAGE_HDR_SIZE)
5891 return -EINVAL;
5892
e78fb4ea
SRG
5893 /* Size of a subbuf cannot be greater than the write counter */
5894 if (psize > RB_WRITE_MASK + 1)
5895 return -EINVAL;
5896
f9b94daa
TSV
5897 old_order = buffer->subbuf_order;
5898 old_size = buffer->subbuf_size;
5899
5900 /* prevent another thread from changing buffer sizes */
5901 mutex_lock(&buffer->mutex);
5902 atomic_inc(&buffer->record_disabled);
5903
5904 /* Make sure all commits have finished */
5905 synchronize_rcu();
5906
2808e31e
TSV
5907 buffer->subbuf_order = order;
5908 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
5909
f9b94daa
TSV
5910 /* Make sure all new buffers are allocated, before deleting the old ones */
5911 for_each_buffer_cpu(buffer, cpu) {
8e7b58c2 5912
f9b94daa
TSV
5913 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5914 continue;
5915
8e7b58c2
SRG
5916 cpu_buffer = buffer->buffers[cpu];
5917
353cc219
SRG
5918 /* Update the number of pages to match the new size */
5919 nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
5920 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
5921
8e7b58c2
SRG
5922 /* we need a minimum of two pages */
5923 if (nr_pages < 2)
5924 nr_pages = 2;
5925
5926 cpu_buffer->nr_pages_to_update = nr_pages;
5927
5928 /* Include the reader page */
5929 nr_pages++;
5930
5931 /* Allocate the new size buffer */
5932 INIT_LIST_HEAD(&cpu_buffer->new_pages);
5933 if (__rb_allocate_pages(cpu_buffer, nr_pages,
5934 &cpu_buffer->new_pages)) {
5935 /* not enough memory for new pages */
f9b94daa
TSV
5936 err = -ENOMEM;
5937 goto error;
5938 }
5939 }
5940
5941 for_each_buffer_cpu(buffer, cpu) {
8e7b58c2 5942
f9b94daa
TSV
5943 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5944 continue;
5945
8e7b58c2
SRG
5946 cpu_buffer = buffer->buffers[cpu];
5947
5948 /* Clear the head bit to make the link list normal to read */
5949 rb_head_page_deactivate(cpu_buffer);
5950
5951 /* Now walk the list and free all the old sub buffers */
5952 list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
5953 list_del_init(&bpage->list);
5954 free_buffer_page(bpage);
5955 }
5956 /* The above loop stopped an the last page needing to be freed */
5957 bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
5958 free_buffer_page(bpage);
5959
5960 /* Free the current reader page */
5961 free_buffer_page(cpu_buffer->reader_page);
5962
5963 /* One page was allocated for the reader page */
5964 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
5965 struct buffer_page, list);
5966 list_del_init(&cpu_buffer->reader_page->list);
5967
5968 /* The cpu_buffer pages are a link list with no head */
5969 cpu_buffer->pages = cpu_buffer->new_pages.next;
5970 cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
5971 cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
5972
5973 /* Clear the new_pages list */
5974 INIT_LIST_HEAD(&cpu_buffer->new_pages);
5975
5976 cpu_buffer->head_page
5977 = list_entry(cpu_buffer->pages, struct buffer_page, list);
5978 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
5979
5980 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
5981 cpu_buffer->nr_pages_to_update = 0;
5982
5983 free_pages((unsigned long)cpu_buffer->free_page, old_order);
5984 cpu_buffer->free_page = NULL;
5985
5986 rb_head_page_activate(cpu_buffer);
5987
5988 rb_check_pages(cpu_buffer);
f9b94daa
TSV
5989 }
5990
5991 atomic_dec(&buffer->record_disabled);
5992 mutex_unlock(&buffer->mutex);
5993
2808e31e 5994 return 0;
f9b94daa
TSV
5995
5996error:
5997 buffer->subbuf_order = old_order;
5998 buffer->subbuf_size = old_size;
5999
6000 atomic_dec(&buffer->record_disabled);
6001 mutex_unlock(&buffer->mutex);
6002
6003 for_each_buffer_cpu(buffer, cpu) {
8e7b58c2
SRG
6004 cpu_buffer = buffer->buffers[cpu];
6005
6006 if (!cpu_buffer->nr_pages_to_update)
f9b94daa 6007 continue;
8e7b58c2
SRG
6008
6009 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6010 list_del_init(&bpage->list);
6011 free_buffer_page(bpage);
6012 }
f9b94daa 6013 }
f9b94daa
TSV
6014
6015 return err;
2808e31e
TSV
6016}
6017EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6018
b32614c0
SAS
6019/*
6020 * We only allocate new buffers, never free them if the CPU goes down.
6021 * If we were to free the buffer, then the user would lose any trace that was in
6022 * the buffer.
6023 */
6024int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
554f786e 6025{
13292494 6026 struct trace_buffer *buffer;
9b94a8fb
SRRH
6027 long nr_pages_same;
6028 int cpu_i;
6029 unsigned long nr_pages;
554f786e 6030
13292494 6031 buffer = container_of(node, struct trace_buffer, node);
b32614c0
SAS
6032 if (cpumask_test_cpu(cpu, buffer->cpumask))
6033 return 0;
6034
6035 nr_pages = 0;
6036 nr_pages_same = 1;
6037 /* check if all cpu sizes are same */
6038 for_each_buffer_cpu(buffer, cpu_i) {
6039 /* fill in the size from first enabled cpu */
6040 if (nr_pages == 0)
6041 nr_pages = buffer->buffers[cpu_i]->nr_pages;
6042 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
6043 nr_pages_same = 0;
6044 break;
554f786e 6045 }
554f786e 6046 }
b32614c0
SAS
6047 /* allocate minimum pages, user can later expand it */
6048 if (!nr_pages_same)
6049 nr_pages = 2;
6050 buffer->buffers[cpu] =
6051 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
6052 if (!buffer->buffers[cpu]) {
6053 WARN(1, "failed to allocate ring buffer on CPU %u\n",
6054 cpu);
6055 return -ENOMEM;
6056 }
6057 smp_wmb();
6058 cpumask_set_cpu(cpu, buffer->cpumask);
6059 return 0;
554f786e 6060}
6c43e554
SRRH
6061
6062#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
6063/*
6064 * This is a basic integrity check of the ring buffer.
6065 * Late in the boot cycle this test will run when configured in.
6066 * It will kick off a thread per CPU that will go into a loop
6067 * writing to the per cpu ring buffer various sizes of data.
6068 * Some of the data will be large items, some small.
6069 *
6070 * Another thread is created that goes into a spin, sending out
6071 * IPIs to the other CPUs to also write into the ring buffer.
6072 * this is to test the nesting ability of the buffer.
6073 *
6074 * Basic stats are recorded and reported. If something in the
6075 * ring buffer should happen that's not expected, a big warning
6076 * is displayed and all ring buffers are disabled.
6077 */
6078static struct task_struct *rb_threads[NR_CPUS] __initdata;
6079
6080struct rb_test_data {
13292494 6081 struct trace_buffer *buffer;
6c43e554
SRRH
6082 unsigned long events;
6083 unsigned long bytes_written;
6084 unsigned long bytes_alloc;
6085 unsigned long bytes_dropped;
6086 unsigned long events_nested;
6087 unsigned long bytes_written_nested;
6088 unsigned long bytes_alloc_nested;
6089 unsigned long bytes_dropped_nested;
6090 int min_size_nested;
6091 int max_size_nested;
6092 int max_size;
6093 int min_size;
6094 int cpu;
6095 int cnt;
6096};
6097
6098static struct rb_test_data rb_data[NR_CPUS] __initdata;
6099
6100/* 1 meg per cpu */
6101#define RB_TEST_BUFFER_SIZE 1048576
6102
6103static char rb_string[] __initdata =
6104 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
6105 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
6106 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
6107
6108static bool rb_test_started __initdata;
6109
6110struct rb_item {
6111 int size;
6112 char str[];
6113};
6114
6115static __init int rb_write_something(struct rb_test_data *data, bool nested)
6116{
6117 struct ring_buffer_event *event;
6118 struct rb_item *item;
6119 bool started;
6120 int event_len;
6121 int size;
6122 int len;
6123 int cnt;
6124
6125 /* Have nested writes different that what is written */
6126 cnt = data->cnt + (nested ? 27 : 0);
6127
6128 /* Multiply cnt by ~e, to make some unique increment */
40ed29b3 6129 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6c43e554
SRRH
6130
6131 len = size + sizeof(struct rb_item);
6132
6133 started = rb_test_started;
6134 /* read rb_test_started before checking buffer enabled */
6135 smp_rmb();
6136
6137 event = ring_buffer_lock_reserve(data->buffer, len);
6138 if (!event) {
6139 /* Ignore dropped events before test starts. */
6140 if (started) {
6141 if (nested)
6142 data->bytes_dropped += len;
6143 else
6144 data->bytes_dropped_nested += len;
6145 }
6146 return len;
6147 }
6148
6149 event_len = ring_buffer_event_length(event);
6150
6151 if (RB_WARN_ON(data->buffer, event_len < len))
6152 goto out;
6153
6154 item = ring_buffer_event_data(event);
6155 item->size = size;
6156 memcpy(item->str, rb_string, size);
6157
6158 if (nested) {
6159 data->bytes_alloc_nested += event_len;
6160 data->bytes_written_nested += len;
6161 data->events_nested++;
6162 if (!data->min_size_nested || len < data->min_size_nested)
6163 data->min_size_nested = len;
6164 if (len > data->max_size_nested)
6165 data->max_size_nested = len;
6166 } else {
6167 data->bytes_alloc += event_len;
6168 data->bytes_written += len;
6169 data->events++;
6170 if (!data->min_size || len < data->min_size)
6171 data->max_size = len;
6172 if (len > data->max_size)
6173 data->max_size = len;
6174 }
6175
6176 out:
04aabc32 6177 ring_buffer_unlock_commit(data->buffer);
6c43e554
SRRH
6178
6179 return 0;
6180}
6181
6182static __init int rb_test(void *arg)
6183{
6184 struct rb_test_data *data = arg;
6185
6186 while (!kthread_should_stop()) {
6187 rb_write_something(data, false);
6188 data->cnt++;
6189
6190 set_current_state(TASK_INTERRUPTIBLE);
6191 /* Now sleep between a min of 100-300us and a max of 1ms */
6192 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6193 }
6194
6195 return 0;
6196}
6197
6198static __init void rb_ipi(void *ignore)
6199{
6200 struct rb_test_data *data;
6201 int cpu = smp_processor_id();
6202
6203 data = &rb_data[cpu];
6204 rb_write_something(data, true);
6205}
6206
6207static __init int rb_hammer_test(void *arg)
6208{
6209 while (!kthread_should_stop()) {
6210
6211 /* Send an IPI to all cpus to write data! */
6212 smp_call_function(rb_ipi, NULL, 1);
6213 /* No sleep, but for non preempt, let others run */
6214 schedule();
6215 }
6216
6217 return 0;
6218}
6219
6220static __init int test_ringbuffer(void)
6221{
6222 struct task_struct *rb_hammer;
13292494 6223 struct trace_buffer *buffer;
6c43e554
SRRH
6224 int cpu;
6225 int ret = 0;
6226
a356646a 6227 if (security_locked_down(LOCKDOWN_TRACEFS)) {
ee195452 6228 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
a356646a
SRV
6229 return 0;
6230 }
6231
6c43e554
SRRH
6232 pr_info("Running ring buffer tests...\n");
6233
6234 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6235 if (WARN_ON(!buffer))
6236 return 0;
6237
6238 /* Disable buffer so that threads can't write to it yet */
6239 ring_buffer_record_off(buffer);
6240
6241 for_each_online_cpu(cpu) {
6242 rb_data[cpu].buffer = buffer;
6243 rb_data[cpu].cpu = cpu;
6244 rb_data[cpu].cnt = cpu;
64ed3a04
CH
6245 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6246 cpu, "rbtester/%u");
62277de7 6247 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6c43e554 6248 pr_cont("FAILED\n");
62277de7 6249 ret = PTR_ERR(rb_threads[cpu]);
6c43e554
SRRH
6250 goto out_free;
6251 }
6c43e554
SRRH
6252 }
6253
6254 /* Now create the rb hammer! */
6255 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
62277de7 6256 if (WARN_ON(IS_ERR(rb_hammer))) {
6c43e554 6257 pr_cont("FAILED\n");
62277de7 6258 ret = PTR_ERR(rb_hammer);
6c43e554
SRRH
6259 goto out_free;
6260 }
6261
6262 ring_buffer_record_on(buffer);
6263 /*
6264 * Show buffer is enabled before setting rb_test_started.
6265 * Yes there's a small race window where events could be
6266 * dropped and the thread wont catch it. But when a ring
6267 * buffer gets enabled, there will always be some kind of
6268 * delay before other CPUs see it. Thus, we don't care about
6269 * those dropped events. We care about events dropped after
6270 * the threads see that the buffer is active.
6271 */
6272 smp_wmb();
6273 rb_test_started = true;
6274
6275 set_current_state(TASK_INTERRUPTIBLE);
6276 /* Just run for 10 seconds */;
6277 schedule_timeout(10 * HZ);
6278
6279 kthread_stop(rb_hammer);
6280
6281 out_free:
6282 for_each_online_cpu(cpu) {
6283 if (!rb_threads[cpu])
6284 break;
6285 kthread_stop(rb_threads[cpu]);
6286 }
6287 if (ret) {
6288 ring_buffer_free(buffer);
6289 return ret;
6290 }
6291
6292 /* Report! */
6293 pr_info("finished\n");
6294 for_each_online_cpu(cpu) {
6295 struct ring_buffer_event *event;
6296 struct rb_test_data *data = &rb_data[cpu];
6297 struct rb_item *item;
6298 unsigned long total_events;
6299 unsigned long total_dropped;
6300 unsigned long total_written;
6301 unsigned long total_alloc;
6302 unsigned long total_read = 0;
6303 unsigned long total_size = 0;
6304 unsigned long total_len = 0;
6305 unsigned long total_lost = 0;
6306 unsigned long lost;
6307 int big_event_size;
6308 int small_event_size;
6309
6310 ret = -1;
6311
6312 total_events = data->events + data->events_nested;
6313 total_written = data->bytes_written + data->bytes_written_nested;
6314 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6315 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6316
6317 big_event_size = data->max_size + data->max_size_nested;
6318 small_event_size = data->min_size + data->min_size_nested;
6319
6320 pr_info("CPU %d:\n", cpu);
6321 pr_info(" events: %ld\n", total_events);
6322 pr_info(" dropped bytes: %ld\n", total_dropped);
6323 pr_info(" alloced bytes: %ld\n", total_alloc);
6324 pr_info(" written bytes: %ld\n", total_written);
6325 pr_info(" biggest event: %d\n", big_event_size);
6326 pr_info(" smallest event: %d\n", small_event_size);
6327
6328 if (RB_WARN_ON(buffer, total_dropped))
6329 break;
6330
6331 ret = 0;
6332
6333 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6334 total_lost += lost;
6335 item = ring_buffer_event_data(event);
6336 total_len += ring_buffer_event_length(event);
6337 total_size += item->size + sizeof(struct rb_item);
6338 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6339 pr_info("FAILED!\n");
6340 pr_info("buffer had: %.*s\n", item->size, item->str);
6341 pr_info("expected: %.*s\n", item->size, rb_string);
6342 RB_WARN_ON(buffer, 1);
6343 ret = -1;
6344 break;
6345 }
6346 total_read++;
6347 }
6348 if (ret)
6349 break;
6350
6351 ret = -1;
6352
6353 pr_info(" read events: %ld\n", total_read);
6354 pr_info(" lost events: %ld\n", total_lost);
6355 pr_info(" total events: %ld\n", total_lost + total_read);
6356 pr_info(" recorded len bytes: %ld\n", total_len);
6357 pr_info(" recorded size bytes: %ld\n", total_size);
ed888241 6358 if (total_lost) {
6c43e554
SRRH
6359 pr_info(" With dropped events, record len and size may not match\n"
6360 " alloced and written from above\n");
ed888241 6361 } else {
6c43e554
SRRH
6362 if (RB_WARN_ON(buffer, total_len != total_alloc ||
6363 total_size != total_written))
6364 break;
6365 }
6366 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6367 break;
6368
6369 ret = 0;
6370 }
6371 if (!ret)
6372 pr_info("Ring buffer PASSED!\n");
6373
6374 ring_buffer_free(buffer);
6375 return 0;
6376}
6377
6378late_initcall(test_ringbuffer);
6379#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */