Commit | Line | Data |
---|---|---|
5092dbc9 SR |
1 | /* |
2 | * ring buffer tester and benchmark | |
3 | * | |
4 | * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> | |
5 | */ | |
6 | #include <linux/ring_buffer.h> | |
7 | #include <linux/completion.h> | |
8 | #include <linux/kthread.h> | |
9 | #include <linux/module.h> | |
da194930 | 10 | #include <linux/ktime.h> |
79615760 | 11 | #include <asm/local.h> |
5092dbc9 SR |
12 | |
13 | struct rb_page { | |
14 | u64 ts; | |
15 | local_t commit; | |
16 | char data[4080]; | |
17 | }; | |
18 | ||
19 | /* run time and sleep time in seconds */ | |
da194930 | 20 | #define RUN_TIME 10ULL |
5092dbc9 SR |
21 | #define SLEEP_TIME 10 |
22 | ||
23 | /* number of events for writer to wake up the reader */ | |
24 | static int wakeup_interval = 100; | |
25 | ||
26 | static int reader_finish; | |
8b46ff69 PM |
27 | static DECLARE_COMPLETION(read_start); |
28 | static DECLARE_COMPLETION(read_done); | |
5092dbc9 SR |
29 | |
30 | static struct ring_buffer *buffer; | |
31 | static struct task_struct *producer; | |
32 | static struct task_struct *consumer; | |
33 | static unsigned long read; | |
34 | ||
33d657d1 | 35 | static unsigned int disable_reader; |
5092dbc9 SR |
36 | module_param(disable_reader, uint, 0644); |
37 | MODULE_PARM_DESC(disable_reader, "only run producer"); | |
38 | ||
33d657d1 | 39 | static unsigned int write_iteration = 50; |
a6f0eb6a SR |
40 | module_param(write_iteration, uint, 0644); |
41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | |
42 | ||
2b3942e4 DY |
43 | static int producer_nice = MAX_NICE; |
44 | static int consumer_nice = MAX_NICE; | |
7ac07434 SR |
45 | |
46 | static int producer_fifo = -1; | |
47 | static int consumer_fifo = -1; | |
48 | ||
7364e865 | 49 | module_param(producer_nice, int, 0644); |
7ac07434 SR |
50 | MODULE_PARM_DESC(producer_nice, "nice prio for producer"); |
51 | ||
7364e865 | 52 | module_param(consumer_nice, int, 0644); |
7ac07434 SR |
53 | MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); |
54 | ||
7364e865 | 55 | module_param(producer_fifo, int, 0644); |
7ac07434 SR |
56 | MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); |
57 | ||
7364e865 | 58 | module_param(consumer_fifo, int, 0644); |
7ac07434 SR |
59 | MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); |
60 | ||
5092dbc9 SR |
61 | static int read_events; |
62 | ||
63 | static int kill_test; | |
64 | ||
65 | #define KILL_TEST() \ | |
66 | do { \ | |
67 | if (!kill_test) { \ | |
68 | kill_test = 1; \ | |
69 | WARN_ON(1); \ | |
70 | } \ | |
71 | } while (0) | |
72 | ||
73 | enum event_status { | |
74 | EVENT_FOUND, | |
75 | EVENT_DROPPED, | |
76 | }; | |
77 | ||
78 | static enum event_status read_event(int cpu) | |
79 | { | |
80 | struct ring_buffer_event *event; | |
81 | int *entry; | |
82 | u64 ts; | |
83 | ||
66a8cb95 | 84 | event = ring_buffer_consume(buffer, cpu, &ts, NULL); |
5092dbc9 SR |
85 | if (!event) |
86 | return EVENT_DROPPED; | |
87 | ||
88 | entry = ring_buffer_event_data(event); | |
89 | if (*entry != cpu) { | |
90 | KILL_TEST(); | |
91 | return EVENT_DROPPED; | |
92 | } | |
93 | ||
94 | read++; | |
95 | return EVENT_FOUND; | |
96 | } | |
97 | ||
98 | static enum event_status read_page(int cpu) | |
99 | { | |
100 | struct ring_buffer_event *event; | |
101 | struct rb_page *rpage; | |
102 | unsigned long commit; | |
103 | void *bpage; | |
104 | int *entry; | |
105 | int ret; | |
106 | int inc; | |
107 | int i; | |
108 | ||
7ea59064 | 109 | bpage = ring_buffer_alloc_read_page(buffer, cpu); |
00c81a58 SR |
110 | if (!bpage) |
111 | return EVENT_DROPPED; | |
112 | ||
5092dbc9 SR |
113 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); |
114 | if (ret >= 0) { | |
115 | rpage = bpage; | |
a838b2e6 SR |
116 | /* The commit may have missed event flags set, clear them */ |
117 | commit = local_read(&rpage->commit) & 0xfffff; | |
5092dbc9 SR |
118 | for (i = 0; i < commit && !kill_test; i += inc) { |
119 | ||
120 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { | |
121 | KILL_TEST(); | |
122 | break; | |
123 | } | |
124 | ||
125 | inc = -1; | |
126 | event = (void *)&rpage->data[i]; | |
127 | switch (event->type_len) { | |
128 | case RINGBUF_TYPE_PADDING: | |
9086c7b9 SR |
129 | /* failed writes may be discarded events */ |
130 | if (!event->time_delta) | |
131 | KILL_TEST(); | |
132 | inc = event->array[0] + 4; | |
5092dbc9 SR |
133 | break; |
134 | case RINGBUF_TYPE_TIME_EXTEND: | |
135 | inc = 8; | |
136 | break; | |
137 | case 0: | |
138 | entry = ring_buffer_event_data(event); | |
139 | if (*entry != cpu) { | |
140 | KILL_TEST(); | |
141 | break; | |
142 | } | |
143 | read++; | |
144 | if (!event->array[0]) { | |
145 | KILL_TEST(); | |
146 | break; | |
147 | } | |
9086c7b9 | 148 | inc = event->array[0] + 4; |
5092dbc9 SR |
149 | break; |
150 | default: | |
151 | entry = ring_buffer_event_data(event); | |
152 | if (*entry != cpu) { | |
153 | KILL_TEST(); | |
154 | break; | |
155 | } | |
156 | read++; | |
157 | inc = ((event->type_len + 1) * 4); | |
158 | } | |
159 | if (kill_test) | |
160 | break; | |
161 | ||
162 | if (inc <= 0) { | |
163 | KILL_TEST(); | |
164 | break; | |
165 | } | |
166 | } | |
167 | } | |
168 | ring_buffer_free_read_page(buffer, bpage); | |
169 | ||
170 | if (ret < 0) | |
171 | return EVENT_DROPPED; | |
172 | return EVENT_FOUND; | |
173 | } | |
174 | ||
175 | static void ring_buffer_consumer(void) | |
176 | { | |
177 | /* toggle between reading pages and events */ | |
178 | read_events ^= 1; | |
179 | ||
180 | read = 0; | |
8b46ff69 PM |
181 | /* |
182 | * Continue running until the producer specifically asks to stop | |
183 | * and is ready for the completion. | |
184 | */ | |
185 | while (!READ_ONCE(reader_finish)) { | |
186 | int found = 1; | |
5092dbc9 | 187 | |
8b46ff69 | 188 | while (found && !kill_test) { |
5092dbc9 SR |
189 | int cpu; |
190 | ||
191 | found = 0; | |
192 | for_each_online_cpu(cpu) { | |
193 | enum event_status stat; | |
194 | ||
195 | if (read_events) | |
196 | stat = read_event(cpu); | |
197 | else | |
198 | stat = read_page(cpu); | |
199 | ||
200 | if (kill_test) | |
201 | break; | |
8b46ff69 | 202 | |
5092dbc9 SR |
203 | if (stat == EVENT_FOUND) |
204 | found = 1; | |
8b46ff69 | 205 | |
5092dbc9 | 206 | } |
8b46ff69 | 207 | } |
5092dbc9 | 208 | |
8b46ff69 PM |
209 | /* Wait till the producer wakes us up when there is more data |
210 | * available or when the producer wants us to finish reading. | |
211 | */ | |
5092dbc9 SR |
212 | set_current_state(TASK_INTERRUPTIBLE); |
213 | if (reader_finish) | |
214 | break; | |
215 | ||
216 | schedule(); | |
5092dbc9 | 217 | } |
8b46ff69 | 218 | __set_current_state(TASK_RUNNING); |
5092dbc9 SR |
219 | reader_finish = 0; |
220 | complete(&read_done); | |
221 | } | |
222 | ||
223 | static void ring_buffer_producer(void) | |
224 | { | |
da194930 | 225 | ktime_t start_time, end_time, timeout; |
5092dbc9 SR |
226 | unsigned long long time; |
227 | unsigned long long entries; | |
228 | unsigned long long overruns; | |
229 | unsigned long missed = 0; | |
230 | unsigned long hit = 0; | |
231 | unsigned long avg; | |
232 | int cnt = 0; | |
233 | ||
234 | /* | |
235 | * Hammer the buffer for 10 secs (this may | |
236 | * make the system stall) | |
237 | */ | |
4b221f03 | 238 | trace_printk("Starting ring buffer hammer\n"); |
da194930 TR |
239 | start_time = ktime_get(); |
240 | timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC); | |
5092dbc9 SR |
241 | do { |
242 | struct ring_buffer_event *event; | |
243 | int *entry; | |
a6f0eb6a SR |
244 | int i; |
245 | ||
246 | for (i = 0; i < write_iteration; i++) { | |
247 | event = ring_buffer_lock_reserve(buffer, 10); | |
248 | if (!event) { | |
249 | missed++; | |
250 | } else { | |
251 | hit++; | |
252 | entry = ring_buffer_event_data(event); | |
253 | *entry = smp_processor_id(); | |
254 | ring_buffer_unlock_commit(buffer, event); | |
255 | } | |
5092dbc9 | 256 | } |
da194930 | 257 | end_time = ktime_get(); |
5092dbc9 | 258 | |
0574ea42 SR |
259 | cnt++; |
260 | if (consumer && !(cnt % wakeup_interval)) | |
5092dbc9 SR |
261 | wake_up_process(consumer); |
262 | ||
0574ea42 | 263 | #ifndef CONFIG_PREEMPT |
29c8000e SR |
264 | /* |
265 | * If we are a non preempt kernel, the 10 second run will | |
266 | * stop everything while it runs. Instead, we will call | |
267 | * cond_resched and also add any time that was lost by a | |
268 | * rescedule. | |
0574ea42 SR |
269 | * |
270 | * Do a cond resched at the same frequency we would wake up | |
271 | * the reader. | |
29c8000e | 272 | */ |
0574ea42 SR |
273 | if (cnt % wakeup_interval) |
274 | cond_resched(); | |
275 | #endif | |
b44754d8 PM |
276 | if (kthread_should_stop()) |
277 | kill_test = 1; | |
3e07a4f6 | 278 | |
da194930 | 279 | } while (ktime_before(end_time, timeout) && !kill_test); |
4b221f03 | 280 | trace_printk("End ring buffer hammer\n"); |
5092dbc9 SR |
281 | |
282 | if (consumer) { | |
283 | /* Init both completions here to avoid races */ | |
284 | init_completion(&read_start); | |
285 | init_completion(&read_done); | |
286 | /* the completions must be visible before the finish var */ | |
287 | smp_wmb(); | |
288 | reader_finish = 1; | |
289 | /* finish var visible before waking up the consumer */ | |
290 | smp_wmb(); | |
291 | wake_up_process(consumer); | |
292 | wait_for_completion(&read_done); | |
293 | } | |
294 | ||
da194930 | 295 | time = ktime_us_delta(end_time, start_time); |
5092dbc9 SR |
296 | |
297 | entries = ring_buffer_entries(buffer); | |
298 | overruns = ring_buffer_overruns(buffer); | |
299 | ||
b44754d8 | 300 | if (kill_test && !kthread_should_stop()) |
4b221f03 | 301 | trace_printk("ERROR!\n"); |
7ac07434 SR |
302 | |
303 | if (!disable_reader) { | |
304 | if (consumer_fifo < 0) | |
305 | trace_printk("Running Consumer at nice: %d\n", | |
306 | consumer_nice); | |
307 | else | |
308 | trace_printk("Running Consumer at SCHED_FIFO %d\n", | |
309 | consumer_fifo); | |
310 | } | |
311 | if (producer_fifo < 0) | |
312 | trace_printk("Running Producer at nice: %d\n", | |
313 | producer_nice); | |
314 | else | |
315 | trace_printk("Running Producer at SCHED_FIFO %d\n", | |
316 | producer_fifo); | |
317 | ||
318 | /* Let the user know that the test is running at low priority */ | |
319 | if (producer_fifo < 0 && consumer_fifo < 0 && | |
2b3942e4 | 320 | producer_nice == MAX_NICE && consumer_nice == MAX_NICE) |
7ac07434 SR |
321 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); |
322 | ||
4b221f03 SR |
323 | trace_printk("Time: %lld (usecs)\n", time); |
324 | trace_printk("Overruns: %lld\n", overruns); | |
5092dbc9 | 325 | if (disable_reader) |
4b221f03 | 326 | trace_printk("Read: (reader disabled)\n"); |
5092dbc9 | 327 | else |
4b221f03 | 328 | trace_printk("Read: %ld (by %s)\n", read, |
5092dbc9 | 329 | read_events ? "events" : "pages"); |
4b221f03 SR |
330 | trace_printk("Entries: %lld\n", entries); |
331 | trace_printk("Total: %lld\n", entries + overruns + read); | |
332 | trace_printk("Missed: %ld\n", missed); | |
333 | trace_printk("Hit: %ld\n", hit); | |
5092dbc9 | 334 | |
5a772b2b SR |
335 | /* Convert time from usecs to millisecs */ |
336 | do_div(time, USEC_PER_MSEC); | |
5092dbc9 SR |
337 | if (time) |
338 | hit /= (long)time; | |
339 | else | |
4b221f03 | 340 | trace_printk("TIME IS ZERO??\n"); |
5092dbc9 | 341 | |
4b221f03 | 342 | trace_printk("Entries per millisec: %ld\n", hit); |
5092dbc9 SR |
343 | |
344 | if (hit) { | |
5a772b2b SR |
345 | /* Calculate the average time in nanosecs */ |
346 | avg = NSEC_PER_MSEC / hit; | |
4b221f03 | 347 | trace_printk("%ld ns per entry\n", avg); |
5092dbc9 | 348 | } |
7da3046d | 349 | |
7da3046d SR |
350 | if (missed) { |
351 | if (time) | |
352 | missed /= (long)time; | |
353 | ||
4b221f03 SR |
354 | trace_printk("Total iterations per millisec: %ld\n", |
355 | hit + missed); | |
7da3046d | 356 | |
d988ff94 SR |
357 | /* it is possible that hit + missed will overflow and be zero */ |
358 | if (!(hit + missed)) { | |
4b221f03 | 359 | trace_printk("hit + missed overflowed and totalled zero!\n"); |
d988ff94 SR |
360 | hit--; /* make it non zero */ |
361 | } | |
362 | ||
5a772b2b SR |
363 | /* Caculate the average time in nanosecs */ |
364 | avg = NSEC_PER_MSEC / (hit + missed); | |
4b221f03 | 365 | trace_printk("%ld ns per entry\n", avg); |
7da3046d | 366 | } |
5092dbc9 SR |
367 | } |
368 | ||
369 | static void wait_to_die(void) | |
370 | { | |
371 | set_current_state(TASK_INTERRUPTIBLE); | |
372 | while (!kthread_should_stop()) { | |
373 | schedule(); | |
374 | set_current_state(TASK_INTERRUPTIBLE); | |
375 | } | |
376 | __set_current_state(TASK_RUNNING); | |
377 | } | |
378 | ||
379 | static int ring_buffer_consumer_thread(void *arg) | |
380 | { | |
381 | while (!kthread_should_stop() && !kill_test) { | |
382 | complete(&read_start); | |
383 | ||
384 | ring_buffer_consumer(); | |
385 | ||
386 | set_current_state(TASK_INTERRUPTIBLE); | |
387 | if (kthread_should_stop() || kill_test) | |
388 | break; | |
389 | ||
390 | schedule(); | |
5092dbc9 SR |
391 | } |
392 | __set_current_state(TASK_RUNNING); | |
393 | ||
b44754d8 | 394 | if (!kthread_should_stop()) |
5092dbc9 SR |
395 | wait_to_die(); |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
400 | static int ring_buffer_producer_thread(void *arg) | |
401 | { | |
5092dbc9 SR |
402 | while (!kthread_should_stop() && !kill_test) { |
403 | ring_buffer_reset(buffer); | |
404 | ||
405 | if (consumer) { | |
5092dbc9 SR |
406 | wake_up_process(consumer); |
407 | wait_for_completion(&read_start); | |
408 | } | |
409 | ||
410 | ring_buffer_producer(); | |
b44754d8 PM |
411 | if (kill_test) |
412 | goto out_kill; | |
5092dbc9 | 413 | |
4b221f03 | 414 | trace_printk("Sleeping for 10 secs\n"); |
5092dbc9 SR |
415 | set_current_state(TASK_INTERRUPTIBLE); |
416 | schedule_timeout(HZ * SLEEP_TIME); | |
5092dbc9 SR |
417 | } |
418 | ||
b44754d8 PM |
419 | out_kill: |
420 | if (!kthread_should_stop()) | |
5092dbc9 SR |
421 | wait_to_die(); |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | static int __init ring_buffer_benchmark_init(void) | |
427 | { | |
428 | int ret; | |
429 | ||
430 | /* make a one meg buffer in overwite mode */ | |
431 | buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); | |
432 | if (!buffer) | |
433 | return -ENOMEM; | |
434 | ||
435 | if (!disable_reader) { | |
436 | consumer = kthread_create(ring_buffer_consumer_thread, | |
437 | NULL, "rb_consumer"); | |
438 | ret = PTR_ERR(consumer); | |
439 | if (IS_ERR(consumer)) | |
440 | goto out_fail; | |
441 | } | |
442 | ||
443 | producer = kthread_run(ring_buffer_producer_thread, | |
444 | NULL, "rb_producer"); | |
445 | ret = PTR_ERR(producer); | |
446 | ||
447 | if (IS_ERR(producer)) | |
448 | goto out_kill; | |
449 | ||
98e4833b IM |
450 | /* |
451 | * Run them as low-prio background tasks by default: | |
452 | */ | |
7ac07434 SR |
453 | if (!disable_reader) { |
454 | if (consumer_fifo >= 0) { | |
455 | struct sched_param param = { | |
456 | .sched_priority = consumer_fifo | |
457 | }; | |
458 | sched_setscheduler(consumer, SCHED_FIFO, ¶m); | |
459 | } else | |
460 | set_user_nice(consumer, consumer_nice); | |
461 | } | |
462 | ||
463 | if (producer_fifo >= 0) { | |
464 | struct sched_param param = { | |
10802932 | 465 | .sched_priority = producer_fifo |
7ac07434 SR |
466 | }; |
467 | sched_setscheduler(producer, SCHED_FIFO, ¶m); | |
468 | } else | |
469 | set_user_nice(producer, producer_nice); | |
98e4833b | 470 | |
5092dbc9 SR |
471 | return 0; |
472 | ||
473 | out_kill: | |
474 | if (consumer) | |
475 | kthread_stop(consumer); | |
476 | ||
477 | out_fail: | |
478 | ring_buffer_free(buffer); | |
479 | return ret; | |
480 | } | |
481 | ||
482 | static void __exit ring_buffer_benchmark_exit(void) | |
483 | { | |
484 | kthread_stop(producer); | |
485 | if (consumer) | |
486 | kthread_stop(consumer); | |
487 | ring_buffer_free(buffer); | |
488 | } | |
489 | ||
490 | module_init(ring_buffer_benchmark_init); | |
491 | module_exit(ring_buffer_benchmark_exit); | |
492 | ||
493 | MODULE_AUTHOR("Steven Rostedt"); | |
494 | MODULE_DESCRIPTION("ring_buffer_benchmark"); | |
495 | MODULE_LICENSE("GPL"); |