torture: Add scftorture to the rcutorture scripting
[linux-block.git] / kernel / scftorture.c
CommitLineData
e9d338a0
PM
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Torture test for smp_call_function() and friends.
4//
5// Copyright (C) Facebook, 2020.
6//
7// Author: Paul E. McKenney <paulmck@kernel.org>
8
9#define pr_fmt(fmt) fmt
10
11#include <linux/atomic.h>
12#include <linux/bitops.h>
13#include <linux/completion.h>
14#include <linux/cpu.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/kthread.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/notifier.h>
25#include <linux/percpu.h>
26#include <linux/rcupdate.h>
27#include <linux/rcupdate_trace.h>
28#include <linux/reboot.h>
29#include <linux/sched.h>
30#include <linux/spinlock.h>
31#include <linux/smp.h>
32#include <linux/stat.h>
33#include <linux/srcu.h>
34#include <linux/slab.h>
35#include <linux/torture.h>
36#include <linux/types.h>
37
38#define SCFTORT_STRING "scftorture"
39#define SCFTORT_FLAG SCFTORT_STRING ": "
40
41#define SCFTORTOUT(s, x...) \
42 pr_alert(SCFTORT_FLAG s, ## x)
43
44#define VERBOSE_SCFTORTOUT(s, x...) \
45 do { if (verbose) pr_alert(SCFTORT_FLAG s, ## x); } while (0)
46
47#define VERBOSE_SCFTORTOUT_ERRSTRING(s, x...) \
48 do { if (verbose) pr_alert(SCFTORT_FLAG "!!! " s, ## x); } while (0)
49
50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
52
53// Wait until there are multiple CPUs before starting test.
54torture_param(int, holdoff, IS_BUILTIN(CONFIG_SCF_TORTURE_TEST) ? 10 : 0,
55 "Holdoff time before test start (s)");
56torture_param(int, longwait, 0, "Include ridiculously long waits? (seconds)");
57torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs.");
58torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
59torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
60torture_param(int, shutdown_secs, 0, "Shutdown time (ms), <= zero to disable.");
61torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s.");
62torture_param(int, stutter_cpus, 5, "Number of jiffies to change CPUs under test, 0=disable");
63torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU hotplug.");
64torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
65torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations.");
66torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations.");
67torture_param(int, weight_mult, -1, "Testing weight for multi-CPU no-wait operations.");
68torture_param(int, weight_mult_wait, -1, "Testing weight for multi-CPU operations.");
69torture_param(int, weight_all, -1, "Testing weight for all-CPU no-wait operations.");
70torture_param(int, weight_all_wait, -1, "Testing weight for all-CPU operations.");
71
72char *torture_type = "";
73
74#ifdef MODULE
75# define SCFTORT_SHUTDOWN 0
76#else
77# define SCFTORT_SHUTDOWN 1
78#endif
79
80torture_param(bool, shutdown, SCFTORT_SHUTDOWN, "Shutdown at end of torture test.");
81
82struct scf_statistics {
83 struct task_struct *task;
84 int cpu;
85 long long n_single;
86 long long n_single_wait;
87 long long n_multi;
88 long long n_multi_wait;
89 long long n_all;
90 long long n_all_wait;
91};
92
93static struct scf_statistics *scf_stats_p;
94static struct task_struct *scf_torture_stats_task;
95static DEFINE_PER_CPU(long long, scf_invoked_count);
96
97// Use to wait for all threads to start.
98static atomic_t n_started;
99static atomic_t n_errs;
100static bool scfdone;
101
102DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
103
104// Print torture statistics. Caller must ensure serialization.
105static void scf_torture_stats_print(void)
106{
107 int cpu;
108 long long invoked_count = 0;
109 bool isdone = READ_ONCE(scfdone);
110
111 for_each_possible_cpu(cpu)
112 invoked_count += data_race(per_cpu(scf_invoked_count, cpu));
113 pr_alert("%s scf_invoked_count %s: %lld ",
114 SCFTORT_FLAG, isdone ? "VER" : "ver", invoked_count);
115 torture_onoff_stats();
116 pr_cont("\n");
117}
118
119// Periodically prints torture statistics, if periodic statistics printing
120// was specified via the stat_interval module parameter.
121static int
122scf_torture_stats(void *arg)
123{
124 VERBOSE_TOROUT_STRING("scf_torture_stats task started");
125 do {
126 schedule_timeout_interruptible(stat_interval * HZ);
127 scf_torture_stats_print();
128 torture_shutdown_absorb("scf_torture_stats");
129 } while (!torture_must_stop());
130 torture_kthread_stopping("scf_torture_stats");
131 return 0;
132}
133
134// Update statistics and occasionally burn up mass quantities of CPU time,
135// if told to do so via scftorture.longwait. Otherwise, occasionally burn
136// a little bit.
137static void scf_handler(void *unused)
138{
139 int i;
140 int j;
141 unsigned long r = torture_random(this_cpu_ptr(&scf_torture_rand));
142
143 this_cpu_inc(scf_invoked_count);
144 if (longwait <= 0) {
145 if (!(r & 0xffc0))
146 udelay(r & 0x3f);
147 return;
148 }
149 if (r & 0xfff)
150 return;
151 r = (r >> 12);
152 if (longwait <= 0) {
153 udelay((r & 0xff) + 1);
154 return;
155 }
156 r = r % longwait + 1;
157 for (i = 0; i < r; i++) {
158 for (j = 0; j < 1000; j++) {
159 udelay(1000);
160 cpu_relax();
161 }
162 }
163}
164
165// Randomly do an smp_call_function*() invocation.
166static void scftorture_invoke_one(struct scf_statistics *scfp,struct torture_random_state *trsp)
167{
168 if (use_cpus_read_lock)
169 cpus_read_lock();
170 else
171 preempt_disable();
172 scfp->n_all++;
173 smp_call_function(scf_handler, NULL, 0);
174 if (use_cpus_read_lock)
175 cpus_read_unlock();
176 else
177 preempt_enable();
178 if (!(torture_random(trsp) & 0xfff))
179 schedule_timeout_uninterruptible(1);
180}
181
182// SCF test kthread. Repeatedly does calls to members of the
183// smp_call_function() family of functions.
184static int scftorture_invoker(void *arg)
185{
186 DEFINE_TORTURE_RANDOM(rand);
187 struct scf_statistics *scfp = (struct scf_statistics *)arg;
188
189 VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
190 set_cpus_allowed_ptr(current, cpumask_of(scfp->cpu % nr_cpu_ids));
191 set_user_nice(current, MAX_NICE);
192 if (holdoff)
193 schedule_timeout_interruptible(holdoff * HZ);
194
195 VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
196
197 // Make sure that the CPU is affinitized appropriately during testing.
198 WARN_ON_ONCE(smp_processor_id() != scfp->cpu);
199
200 if (!atomic_dec_return(&n_started))
201 while (atomic_read_acquire(&n_started)) {
202 if (torture_must_stop()) {
203 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp->cpu);
204 goto end;
205 }
206 schedule_timeout_uninterruptible(1);
207 }
208
209 VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu);
210
211 do {
212 scftorture_invoke_one(scfp, &rand);
213 } while (!torture_must_stop());
214
215 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp->cpu);
216end:
217 torture_kthread_stopping("scftorture_invoker");
218 return 0;
219}
220
221static void
222scftorture_print_module_parms(const char *tag)
223{
224 pr_alert(SCFTORT_FLAG
225 "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter_cpus=%d use_cpus_read_lock=%d, weight_single=%d, weight_single_wait=%d, weight_mult=%d, weight_mult_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
226 verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter_cpus, use_cpus_read_lock, weight_single, weight_single_wait, weight_mult, weight_mult_wait, weight_all, weight_all_wait);
227}
228
229static void scf_cleanup_handler(void *unused)
230{
231}
232
233static void scf_torture_cleanup(void)
234{
235 int i;
236
237 if (torture_cleanup_begin())
238 return;
239
240 WRITE_ONCE(scfdone, true);
241 if (nthreads)
242 for (i = 0; i < nthreads; i++)
243 torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
244 else
245 goto end;
246 kfree(scf_stats_p);
247 scf_stats_p = NULL;
248 smp_call_function(scf_cleanup_handler, NULL, 0);
249 torture_stop_kthread(scf_torture_stats, scf_torture_stats_task);
250 scf_torture_stats_print(); // -After- the stats thread is stopped!
251
252 if (atomic_read(&n_errs))
253 scftorture_print_module_parms("End of test: FAILURE");
254 else if (torture_onoff_failures())
255 scftorture_print_module_parms("End of test: LOCK_HOTPLUG");
256 else
257 scftorture_print_module_parms("End of test: SUCCESS");
258
259end:
260 torture_cleanup_end();
261}
262
263static int __init scf_torture_init(void)
264{
265 long i;
266 int firsterr = 0;
267
268 if (!torture_init_begin(SCFTORT_STRING, verbose))
269 return -EBUSY;
270
271 scftorture_print_module_parms("Start of test");
272
273 if (weight_single == -1 && weight_single_wait == -1 &&
274 weight_mult == -1 && weight_mult_wait == -1 &&
275 weight_all == -1 && weight_all_wait == -1) {
276 weight_single = 1;
277 weight_single_wait = 1;
278 weight_mult = 1;
279 weight_mult_wait = 1;
280 weight_all = 1;
281 weight_all_wait = 1;
282 } else {
283 if (weight_single == -1)
284 weight_single = 0;
285 if (weight_single_wait == -1)
286 weight_single_wait = 0;
287 if (weight_mult == -1)
288 weight_mult = 0;
289 if (weight_mult_wait == -1)
290 weight_mult_wait = 0;
291 if (weight_all == -1)
292 weight_all = 0;
293 if (weight_all_wait == -1)
294 weight_all_wait = 0;
295 }
296 if (weight_single == 0 && weight_single_wait == 0 &&
297 weight_mult == 0 && weight_mult_wait == 0 &&
298 weight_all == 0 && weight_all_wait == 0) {
299 firsterr = -EINVAL;
300 goto unwind;
301 }
302
303 if (onoff_interval > 0) {
304 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL);
305 if (firsterr)
306 goto unwind;
307 }
308 if (shutdown_secs > 0) {
309 firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup);
310 if (firsterr)
311 goto unwind;
312 }
313
314 // Worker tasks invoking smp_call_function().
315 if (nthreads < 0)
316 nthreads = num_online_cpus();
317 scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL);
318 if (!scf_stats_p) {
319 VERBOSE_SCFTORTOUT_ERRSTRING("out of memory");
320 firsterr = -ENOMEM;
321 goto unwind;
322 }
323
324 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads\n", nthreads);
325
326 atomic_set(&n_started, nthreads);
327 for (i = 0; i < nthreads; i++) {
328 scf_stats_p[i].cpu = i;
329 firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i],
330 scf_stats_p[i].task);
331 if (firsterr)
332 goto unwind;
333 }
334 if (stat_interval > 0) {
335 firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task);
336 if (firsterr)
337 goto unwind;
338 }
339
340 torture_init_end();
341 return 0;
342
343unwind:
344 torture_init_end();
345 scf_torture_cleanup();
346 return firsterr;
347}
348
349module_init(scf_torture_init);
350module_exit(scf_torture_cleanup);