[PATCH] rcu: Mention rcu_bh in description of rcutorture's torture_type parameter
[linux-2.6-block.git] / kernel / rcutorture.c
CommitLineData
a241ec65 1/*
29766f1e 2 * Read-Copy Update module-based torture test facility
a241ec65
PM
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2005
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 *
22 * See also: Documentation/RCU/torture.txt
23 */
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/err.h>
30#include <linux/spinlock.h>
31#include <linux/smp.h>
32#include <linux/rcupdate.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
35#include <asm/atomic.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/completion.h>
39#include <linux/moduleparam.h>
40#include <linux/percpu.h>
41#include <linux/notifier.h>
a241ec65
PM
42#include <linux/cpu.h>
43#include <linux/random.h>
44#include <linux/delay.h>
45#include <linux/byteorder/swabb.h>
46#include <linux/stat.h>
b2896d2e 47#include <linux/srcu.h>
a241ec65
PM
48
49MODULE_LICENSE("GPL");
ff2c93a5 50MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
a241ec65 51
4802211c 52static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
d84f5203 53static int stat_interval; /* Interval between stats, in seconds. */
a241ec65 54 /* Defaults to "only at end of test". */
d84f5203
SV
55static int verbose; /* Print more debug info. */
56static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
57static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
3c29e03d 58static char *torture_type = "rcu"; /* What to torture: rcu, rcu_bh, srcu. */
a241ec65 59
8d3b33f6 60module_param(nreaders, int, 0);
a241ec65 61MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
8d3b33f6 62module_param(stat_interval, int, 0);
a241ec65 63MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
8d3b33f6 64module_param(verbose, bool, 0);
a241ec65 65MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
8d3b33f6 66module_param(test_no_idle_hz, bool, 0);
d84f5203 67MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
8d3b33f6 68module_param(shuffle_interval, int, 0);
d84f5203 69MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
72e9bb54 70module_param(torture_type, charp, 0);
b2896d2e 71MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
72e9bb54
PM
72
73#define TORTURE_FLAG "-torture:"
a241ec65 74#define PRINTK_STRING(s) \
72e9bb54 75 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
a241ec65 76#define VERBOSE_PRINTK_STRING(s) \
72e9bb54 77 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
a241ec65 78#define VERBOSE_PRINTK_ERRSTRING(s) \
72e9bb54 79 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
a241ec65
PM
80
81static char printk_buf[4096];
82
83static int nrealreaders;
84static struct task_struct *writer_task;
85static struct task_struct **reader_tasks;
86static struct task_struct *stats_task;
d84f5203 87static struct task_struct *shuffler_task;
a241ec65
PM
88
89#define RCU_TORTURE_PIPE_LEN 10
90
91struct rcu_torture {
92 struct rcu_head rtort_rcu;
93 int rtort_pipe_count;
94 struct list_head rtort_free;
996417d2 95 int rtort_mbtest;
a241ec65
PM
96};
97
98static int fullstop = 0; /* stop generating callbacks at test end. */
99static LIST_HEAD(rcu_torture_freelist);
100static struct rcu_torture *rcu_torture_current = NULL;
101static long rcu_torture_current_version = 0;
102static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
103static DEFINE_SPINLOCK(rcu_torture_lock);
104static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
105 { 0 };
106static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
107 { 0 };
108static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
b2896d2e
PM
109static atomic_t n_rcu_torture_alloc;
110static atomic_t n_rcu_torture_alloc_fail;
111static atomic_t n_rcu_torture_free;
112static atomic_t n_rcu_torture_mberror;
113static atomic_t n_rcu_torture_error;
a241ec65
PM
114
115/*
116 * Allocate an element from the rcu_tortures pool.
117 */
97a41e26 118static struct rcu_torture *
a241ec65
PM
119rcu_torture_alloc(void)
120{
121 struct list_head *p;
122
adac1665 123 spin_lock_bh(&rcu_torture_lock);
a241ec65
PM
124 if (list_empty(&rcu_torture_freelist)) {
125 atomic_inc(&n_rcu_torture_alloc_fail);
adac1665 126 spin_unlock_bh(&rcu_torture_lock);
a241ec65
PM
127 return NULL;
128 }
129 atomic_inc(&n_rcu_torture_alloc);
130 p = rcu_torture_freelist.next;
131 list_del_init(p);
adac1665 132 spin_unlock_bh(&rcu_torture_lock);
a241ec65
PM
133 return container_of(p, struct rcu_torture, rtort_free);
134}
135
136/*
137 * Free an element to the rcu_tortures pool.
138 */
139static void
140rcu_torture_free(struct rcu_torture *p)
141{
142 atomic_inc(&n_rcu_torture_free);
adac1665 143 spin_lock_bh(&rcu_torture_lock);
a241ec65 144 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
adac1665 145 spin_unlock_bh(&rcu_torture_lock);
a241ec65
PM
146}
147
a241ec65
PM
148struct rcu_random_state {
149 unsigned long rrs_state;
150 unsigned long rrs_count;
151};
152
153#define RCU_RANDOM_MULT 39916801 /* prime */
154#define RCU_RANDOM_ADD 479001701 /* prime */
155#define RCU_RANDOM_REFRESH 10000
156
157#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
158
159/*
160 * Crude but fast random-number generator. Uses a linear congruential
161 * generator, with occasional help from get_random_bytes().
162 */
163static long
164rcu_random(struct rcu_random_state *rrsp)
165{
166 long refresh;
167
168 if (--rrsp->rrs_count < 0) {
169 get_random_bytes(&refresh, sizeof(refresh));
170 rrsp->rrs_state += refresh;
171 rrsp->rrs_count = RCU_RANDOM_REFRESH;
172 }
173 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
174 return swahw32(rrsp->rrs_state);
175}
176
72e9bb54
PM
177/*
178 * Operations vector for selecting different types of tests.
179 */
180
181struct rcu_torture_ops {
182 void (*init)(void);
183 void (*cleanup)(void);
184 int (*readlock)(void);
b2896d2e 185 void (*readdelay)(struct rcu_random_state *rrsp);
72e9bb54
PM
186 void (*readunlock)(int idx);
187 int (*completed)(void);
188 void (*deferredfree)(struct rcu_torture *p);
189 int (*stats)(char *page);
190 char *name;
191};
192static struct rcu_torture_ops *cur_ops = NULL;
193
194/*
195 * Definitions for rcu torture testing.
196 */
197
a49a4af7 198static int rcu_torture_read_lock(void) __acquires(RCU)
72e9bb54
PM
199{
200 rcu_read_lock();
201 return 0;
202}
203
b2896d2e
PM
204static void rcu_read_delay(struct rcu_random_state *rrsp)
205{
206 long delay;
207 const long longdelay = 200;
208
209 /* We want there to be long-running readers, but not all the time. */
210
211 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
212 if (!delay)
213 udelay(longdelay);
214}
215
a49a4af7 216static void rcu_torture_read_unlock(int idx) __releases(RCU)
72e9bb54
PM
217{
218 rcu_read_unlock();
219}
220
221static int rcu_torture_completed(void)
222{
223 return rcu_batches_completed();
224}
225
226static void
227rcu_torture_cb(struct rcu_head *p)
228{
229 int i;
230 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
231
232 if (fullstop) {
233 /* Test is ending, just drop callbacks on the floor. */
234 /* The next initialization will pick up the pieces. */
235 return;
236 }
237 i = rp->rtort_pipe_count;
238 if (i > RCU_TORTURE_PIPE_LEN)
239 i = RCU_TORTURE_PIPE_LEN;
240 atomic_inc(&rcu_torture_wcount[i]);
241 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
242 rp->rtort_mbtest = 0;
243 rcu_torture_free(rp);
244 } else
245 cur_ops->deferredfree(rp);
246}
247
248static void rcu_torture_deferred_free(struct rcu_torture *p)
249{
250 call_rcu(&p->rtort_rcu, rcu_torture_cb);
251}
252
253static struct rcu_torture_ops rcu_ops = {
254 .init = NULL,
255 .cleanup = NULL,
256 .readlock = rcu_torture_read_lock,
b2896d2e 257 .readdelay = rcu_read_delay,
72e9bb54
PM
258 .readunlock = rcu_torture_read_unlock,
259 .completed = rcu_torture_completed,
260 .deferredfree = rcu_torture_deferred_free,
261 .stats = NULL,
262 .name = "rcu"
263};
264
c32e0660
PM
265/*
266 * Definitions for rcu_bh torture testing.
267 */
268
a49a4af7 269static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
c32e0660
PM
270{
271 rcu_read_lock_bh();
272 return 0;
273}
274
a49a4af7 275static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
c32e0660
PM
276{
277 rcu_read_unlock_bh();
278}
279
280static int rcu_bh_torture_completed(void)
281{
282 return rcu_batches_completed_bh();
283}
284
285static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
286{
287 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
288}
289
290static struct rcu_torture_ops rcu_bh_ops = {
291 .init = NULL,
292 .cleanup = NULL,
293 .readlock = rcu_bh_torture_read_lock,
b2896d2e 294 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
c32e0660
PM
295 .readunlock = rcu_bh_torture_read_unlock,
296 .completed = rcu_bh_torture_completed,
297 .deferredfree = rcu_bh_torture_deferred_free,
298 .stats = NULL,
299 .name = "rcu_bh"
300};
301
b2896d2e
PM
302/*
303 * Definitions for srcu torture testing.
304 */
305
306static struct srcu_struct srcu_ctl;
307static struct list_head srcu_removed;
308
309static void srcu_torture_init(void)
310{
311 init_srcu_struct(&srcu_ctl);
312 INIT_LIST_HEAD(&srcu_removed);
313}
314
315static void srcu_torture_cleanup(void)
316{
317 synchronize_srcu(&srcu_ctl);
318 cleanup_srcu_struct(&srcu_ctl);
319}
320
321static int srcu_torture_read_lock(void)
322{
323 return srcu_read_lock(&srcu_ctl);
324}
325
326static void srcu_read_delay(struct rcu_random_state *rrsp)
327{
328 long delay;
329 const long uspertick = 1000000 / HZ;
330 const long longdelay = 10;
331
332 /* We want there to be long-running readers, but not all the time. */
333
334 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
335 if (!delay)
336 schedule_timeout_interruptible(longdelay);
337}
338
339static void srcu_torture_read_unlock(int idx)
340{
341 srcu_read_unlock(&srcu_ctl, idx);
342}
343
344static int srcu_torture_completed(void)
345{
346 return srcu_batches_completed(&srcu_ctl);
347}
348
349static void srcu_torture_deferred_free(struct rcu_torture *p)
350{
351 int i;
352 struct rcu_torture *rp;
353 struct rcu_torture *rp1;
354
355 synchronize_srcu(&srcu_ctl);
356 list_add(&p->rtort_free, &srcu_removed);
357 list_for_each_entry_safe(rp, rp1, &srcu_removed, rtort_free) {
358 i = rp->rtort_pipe_count;
359 if (i > RCU_TORTURE_PIPE_LEN)
360 i = RCU_TORTURE_PIPE_LEN;
361 atomic_inc(&rcu_torture_wcount[i]);
362 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
363 rp->rtort_mbtest = 0;
364 list_del(&rp->rtort_free);
365 rcu_torture_free(rp);
366 }
367 }
368}
369
370static int srcu_torture_stats(char *page)
371{
372 int cnt = 0;
373 int cpu;
374 int idx = srcu_ctl.completed & 0x1;
375
376 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
377 torture_type, TORTURE_FLAG, idx);
378 for_each_possible_cpu(cpu) {
379 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
380 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
381 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
382 }
383 cnt += sprintf(&page[cnt], "\n");
384 return cnt;
385}
386
387static struct rcu_torture_ops srcu_ops = {
388 .init = srcu_torture_init,
389 .cleanup = srcu_torture_cleanup,
390 .readlock = srcu_torture_read_lock,
391 .readdelay = srcu_read_delay,
392 .readunlock = srcu_torture_read_unlock,
393 .completed = srcu_torture_completed,
394 .deferredfree = srcu_torture_deferred_free,
395 .stats = srcu_torture_stats,
396 .name = "srcu"
397};
398
72e9bb54 399static struct rcu_torture_ops *torture_ops[] =
b2896d2e 400 { &rcu_ops, &rcu_bh_ops, &srcu_ops, NULL };
72e9bb54 401
a241ec65
PM
402/*
403 * RCU torture writer kthread. Repeatedly substitutes a new structure
404 * for that pointed to by rcu_torture_current, freeing the old structure
405 * after a series of grace periods (the "pipeline").
406 */
407static int
408rcu_torture_writer(void *arg)
409{
410 int i;
411 long oldbatch = rcu_batches_completed();
412 struct rcu_torture *rp;
413 struct rcu_torture *old_rp;
414 static DEFINE_RCU_RANDOM(rand);
415
416 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
dbdf65b1
IM
417 set_user_nice(current, 19);
418
a241ec65
PM
419 do {
420 schedule_timeout_uninterruptible(1);
a241ec65
PM
421 if ((rp = rcu_torture_alloc()) == NULL)
422 continue;
423 rp->rtort_pipe_count = 0;
424 udelay(rcu_random(&rand) & 0x3ff);
425 old_rp = rcu_torture_current;
996417d2 426 rp->rtort_mbtest = 1;
a241ec65
PM
427 rcu_assign_pointer(rcu_torture_current, rp);
428 smp_wmb();
429 if (old_rp != NULL) {
430 i = old_rp->rtort_pipe_count;
431 if (i > RCU_TORTURE_PIPE_LEN)
432 i = RCU_TORTURE_PIPE_LEN;
433 atomic_inc(&rcu_torture_wcount[i]);
434 old_rp->rtort_pipe_count++;
72e9bb54 435 cur_ops->deferredfree(old_rp);
a241ec65
PM
436 }
437 rcu_torture_current_version++;
72e9bb54 438 oldbatch = cur_ops->completed();
a241ec65
PM
439 } while (!kthread_should_stop() && !fullstop);
440 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
441 while (!kthread_should_stop())
442 schedule_timeout_uninterruptible(1);
443 return 0;
444}
445
446/*
447 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
448 * incrementing the corresponding element of the pipeline array. The
449 * counter in the element should never be greater than 1, otherwise, the
450 * RCU implementation is broken.
451 */
452static int
453rcu_torture_reader(void *arg)
454{
455 int completed;
72e9bb54 456 int idx;
a241ec65
PM
457 DEFINE_RCU_RANDOM(rand);
458 struct rcu_torture *p;
459 int pipe_count;
460
461 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
dbdf65b1
IM
462 set_user_nice(current, 19);
463
a241ec65 464 do {
72e9bb54
PM
465 idx = cur_ops->readlock();
466 completed = cur_ops->completed();
a241ec65
PM
467 p = rcu_dereference(rcu_torture_current);
468 if (p == NULL) {
469 /* Wait for rcu_torture_writer to get underway */
72e9bb54 470 cur_ops->readunlock(idx);
a241ec65
PM
471 schedule_timeout_interruptible(HZ);
472 continue;
473 }
996417d2
PM
474 if (p->rtort_mbtest == 0)
475 atomic_inc(&n_rcu_torture_mberror);
b2896d2e 476 cur_ops->readdelay(&rand);
a241ec65
PM
477 preempt_disable();
478 pipe_count = p->rtort_pipe_count;
479 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
480 /* Should not happen, but... */
481 pipe_count = RCU_TORTURE_PIPE_LEN;
482 }
483 ++__get_cpu_var(rcu_torture_count)[pipe_count];
72e9bb54 484 completed = cur_ops->completed() - completed;
a241ec65
PM
485 if (completed > RCU_TORTURE_PIPE_LEN) {
486 /* Should not happen, but... */
487 completed = RCU_TORTURE_PIPE_LEN;
488 }
489 ++__get_cpu_var(rcu_torture_batch)[completed];
490 preempt_enable();
72e9bb54 491 cur_ops->readunlock(idx);
a241ec65
PM
492 schedule();
493 } while (!kthread_should_stop() && !fullstop);
494 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
495 while (!kthread_should_stop())
496 schedule_timeout_uninterruptible(1);
497 return 0;
498}
499
500/*
501 * Create an RCU-torture statistics message in the specified buffer.
502 */
503static int
504rcu_torture_printk(char *page)
505{
506 int cnt = 0;
507 int cpu;
508 int i;
509 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
510 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
511
0a945022 512 for_each_possible_cpu(cpu) {
a241ec65
PM
513 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
514 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
515 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
516 }
517 }
518 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
519 if (pipesummary[i] != 0)
520 break;
521 }
72e9bb54 522 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
a241ec65 523 cnt += sprintf(&page[cnt],
996417d2
PM
524 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
525 "rtmbe: %d",
a241ec65
PM
526 rcu_torture_current,
527 rcu_torture_current_version,
528 list_empty(&rcu_torture_freelist),
529 atomic_read(&n_rcu_torture_alloc),
530 atomic_read(&n_rcu_torture_alloc_fail),
996417d2
PM
531 atomic_read(&n_rcu_torture_free),
532 atomic_read(&n_rcu_torture_mberror));
533 if (atomic_read(&n_rcu_torture_mberror) != 0)
534 cnt += sprintf(&page[cnt], " !!!");
72e9bb54 535 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
996417d2 536 if (i > 1) {
a241ec65 537 cnt += sprintf(&page[cnt], "!!! ");
996417d2
PM
538 atomic_inc(&n_rcu_torture_error);
539 }
a241ec65
PM
540 cnt += sprintf(&page[cnt], "Reader Pipe: ");
541 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
542 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
72e9bb54 543 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
a241ec65 544 cnt += sprintf(&page[cnt], "Reader Batch: ");
72e9bb54 545 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
a241ec65 546 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
72e9bb54 547 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
a241ec65
PM
548 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
549 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
550 cnt += sprintf(&page[cnt], " %d",
551 atomic_read(&rcu_torture_wcount[i]));
552 }
553 cnt += sprintf(&page[cnt], "\n");
72e9bb54
PM
554 if (cur_ops->stats != NULL)
555 cnt += cur_ops->stats(&page[cnt]);
a241ec65
PM
556 return cnt;
557}
558
559/*
560 * Print torture statistics. Caller must ensure that there is only
561 * one call to this function at a given time!!! This is normally
562 * accomplished by relying on the module system to only have one copy
563 * of the module loaded, and then by giving the rcu_torture_stats
564 * kthread full control (or the init/cleanup functions when rcu_torture_stats
565 * thread is not running).
566 */
567static void
568rcu_torture_stats_print(void)
569{
570 int cnt;
571
572 cnt = rcu_torture_printk(printk_buf);
573 printk(KERN_ALERT "%s", printk_buf);
574}
575
576/*
577 * Periodically prints torture statistics, if periodic statistics printing
578 * was specified via the stat_interval module parameter.
579 *
580 * No need to worry about fullstop here, since this one doesn't reference
581 * volatile state or register callbacks.
582 */
583static int
584rcu_torture_stats(void *arg)
585{
586 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
587 do {
588 schedule_timeout_interruptible(stat_interval * HZ);
589 rcu_torture_stats_print();
590 } while (!kthread_should_stop());
591 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
592 return 0;
593}
594
d84f5203
SV
595static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
596
597/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
598 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
599 */
b2896d2e 600static void rcu_torture_shuffle_tasks(void)
d84f5203
SV
601{
602 cpumask_t tmp_mask = CPU_MASK_ALL;
603 int i;
604
605 lock_cpu_hotplug();
606
607 /* No point in shuffling if there is only one online CPU (ex: UP) */
608 if (num_online_cpus() == 1) {
609 unlock_cpu_hotplug();
610 return;
611 }
612
613 if (rcu_idle_cpu != -1)
614 cpu_clear(rcu_idle_cpu, tmp_mask);
615
616 set_cpus_allowed(current, tmp_mask);
617
618 if (reader_tasks != NULL) {
619 for (i = 0; i < nrealreaders; i++)
620 if (reader_tasks[i])
621 set_cpus_allowed(reader_tasks[i], tmp_mask);
622 }
623
624 if (writer_task)
625 set_cpus_allowed(writer_task, tmp_mask);
626
627 if (stats_task)
628 set_cpus_allowed(stats_task, tmp_mask);
629
630 if (rcu_idle_cpu == -1)
631 rcu_idle_cpu = num_online_cpus() - 1;
632 else
633 rcu_idle_cpu--;
634
635 unlock_cpu_hotplug();
636}
637
638/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
639 * system to become idle at a time and cut off its timer ticks. This is meant
640 * to test the support for such tickless idle CPU in RCU.
641 */
642static int
643rcu_torture_shuffle(void *arg)
644{
645 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
646 do {
647 schedule_timeout_interruptible(shuffle_interval * HZ);
648 rcu_torture_shuffle_tasks();
649 } while (!kthread_should_stop());
650 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
651 return 0;
652}
653
95c38322
PM
654static inline void
655rcu_torture_print_module_parms(char *tag)
656{
72e9bb54 657 printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d "
95c38322
PM
658 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
659 "shuffle_interval = %d\n",
72e9bb54
PM
660 torture_type, tag, nrealreaders, stat_interval, verbose,
661 test_no_idle_hz, shuffle_interval);
95c38322
PM
662}
663
a241ec65
PM
664static void
665rcu_torture_cleanup(void)
666{
667 int i;
668
669 fullstop = 1;
d84f5203
SV
670 if (shuffler_task != NULL) {
671 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
672 kthread_stop(shuffler_task);
673 }
674 shuffler_task = NULL;
675
a241ec65
PM
676 if (writer_task != NULL) {
677 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
678 kthread_stop(writer_task);
679 }
680 writer_task = NULL;
681
682 if (reader_tasks != NULL) {
683 for (i = 0; i < nrealreaders; i++) {
684 if (reader_tasks[i] != NULL) {
685 VERBOSE_PRINTK_STRING(
686 "Stopping rcu_torture_reader task");
687 kthread_stop(reader_tasks[i]);
688 }
689 reader_tasks[i] = NULL;
690 }
691 kfree(reader_tasks);
692 reader_tasks = NULL;
693 }
694 rcu_torture_current = NULL;
695
696 if (stats_task != NULL) {
697 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
698 kthread_stop(stats_task);
699 }
700 stats_task = NULL;
701
702 /* Wait for all RCU callbacks to fire. */
89d46b87 703 rcu_barrier();
a241ec65 704
a241ec65 705 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
72e9bb54
PM
706
707 if (cur_ops->cleanup != NULL)
708 cur_ops->cleanup();
95c38322
PM
709 if (atomic_read(&n_rcu_torture_error))
710 rcu_torture_print_module_parms("End of test: FAILURE");
711 else
712 rcu_torture_print_module_parms("End of test: SUCCESS");
a241ec65
PM
713}
714
715static int
716rcu_torture_init(void)
717{
718 int i;
719 int cpu;
720 int firsterr = 0;
721
722 /* Process args and tell the world that the torturer is on the job. */
723
72e9bb54
PM
724 for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
725 cur_ops = torture_ops[i];
726 if (strcmp(torture_type, cur_ops->name) == 0) {
727 break;
728 }
729 }
730 if (cur_ops == NULL) {
731 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
732 torture_type);
733 return (-EINVAL);
734 }
735 if (cur_ops->init != NULL)
736 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
737
a241ec65
PM
738 if (nreaders >= 0)
739 nrealreaders = nreaders;
740 else
741 nrealreaders = 2 * num_online_cpus();
95c38322 742 rcu_torture_print_module_parms("Start of test");
a241ec65
PM
743 fullstop = 0;
744
745 /* Set up the freelist. */
746
747 INIT_LIST_HEAD(&rcu_torture_freelist);
748 for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
996417d2 749 rcu_tortures[i].rtort_mbtest = 0;
a241ec65
PM
750 list_add_tail(&rcu_tortures[i].rtort_free,
751 &rcu_torture_freelist);
752 }
753
754 /* Initialize the statistics so that each run gets its own numbers. */
755
756 rcu_torture_current = NULL;
757 rcu_torture_current_version = 0;
758 atomic_set(&n_rcu_torture_alloc, 0);
759 atomic_set(&n_rcu_torture_alloc_fail, 0);
760 atomic_set(&n_rcu_torture_free, 0);
996417d2
PM
761 atomic_set(&n_rcu_torture_mberror, 0);
762 atomic_set(&n_rcu_torture_error, 0);
a241ec65
PM
763 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
764 atomic_set(&rcu_torture_wcount[i], 0);
0a945022 765 for_each_possible_cpu(cpu) {
a241ec65
PM
766 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
767 per_cpu(rcu_torture_count, cpu)[i] = 0;
768 per_cpu(rcu_torture_batch, cpu)[i] = 0;
769 }
770 }
771
772 /* Start up the kthreads. */
773
774 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
775 writer_task = kthread_run(rcu_torture_writer, NULL,
776 "rcu_torture_writer");
777 if (IS_ERR(writer_task)) {
778 firsterr = PTR_ERR(writer_task);
779 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
780 writer_task = NULL;
781 goto unwind;
782 }
783 reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]),
784 GFP_KERNEL);
785 if (reader_tasks == NULL) {
786 VERBOSE_PRINTK_ERRSTRING("out of memory");
787 firsterr = -ENOMEM;
788 goto unwind;
789 }
790 for (i = 0; i < nrealreaders; i++) {
791 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
792 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
793 "rcu_torture_reader");
794 if (IS_ERR(reader_tasks[i])) {
795 firsterr = PTR_ERR(reader_tasks[i]);
796 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
797 reader_tasks[i] = NULL;
798 goto unwind;
799 }
800 }
801 if (stat_interval > 0) {
802 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
803 stats_task = kthread_run(rcu_torture_stats, NULL,
804 "rcu_torture_stats");
805 if (IS_ERR(stats_task)) {
806 firsterr = PTR_ERR(stats_task);
807 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
808 stats_task = NULL;
809 goto unwind;
810 }
811 }
d84f5203
SV
812 if (test_no_idle_hz) {
813 rcu_idle_cpu = num_online_cpus() - 1;
814 /* Create the shuffler thread */
815 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
816 "rcu_torture_shuffle");
817 if (IS_ERR(shuffler_task)) {
818 firsterr = PTR_ERR(shuffler_task);
819 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
820 shuffler_task = NULL;
821 goto unwind;
822 }
823 }
a241ec65
PM
824 return 0;
825
826unwind:
827 rcu_torture_cleanup();
828 return firsterr;
829}
830
831module_init(rcu_torture_init);
832module_exit(rcu_torture_cleanup);