blob: b0d3e3c5067286f37fa5abe5663025390a7ed53b [file] [log] [blame]
Paul E. McKenney0af3fe12014-02-04 15:51:41 -08001/*
2 * Module-based torture test facility for locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014
19 *
20 * Author: Paul E. McKenney <paulmck@us.ibm.com>
21 * Based on kernel/rcu/torture.c.
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/err.h>
29#include <linux/spinlock.h>
30#include <linux/smp.h>
31#include <linux/interrupt.h>
32#include <linux/sched.h>
33#include <linux/atomic.h>
34#include <linux/bitops.h>
35#include <linux/completion.h>
36#include <linux/moduleparam.h>
37#include <linux/percpu.h>
38#include <linux/notifier.h>
39#include <linux/reboot.h>
40#include <linux/freezer.h>
41#include <linux/cpu.h>
42#include <linux/delay.h>
43#include <linux/stat.h>
44#include <linux/slab.h>
45#include <linux/trace_clock.h>
46#include <asm/byteorder.h>
47#include <linux/torture.h>
48
49MODULE_LICENSE("GPL");
50MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
51
52torture_param(int, nwriters_stress, -1,
53 "Number of write-locking stress-test threads");
54torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
55torture_param(int, onoff_interval, 0,
56 "Time between CPU hotplugs (s), 0=disable");
57torture_param(int, shuffle_interval, 3,
58 "Number of jiffies between shuffles, 0=disable");
59torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
60torture_param(int, stat_interval, 60,
61 "Number of seconds between stats printk()s");
62torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
63torture_param(bool, verbose, true,
64 "Enable verbose debugging printk()s");
65
66static char *torture_type = "spin_lock";
67module_param(torture_type, charp, 0444);
68MODULE_PARM_DESC(torture_type,
69 "Type of lock to torture (spin_lock, spin_lock_irq, ...)");
70
71static atomic_t n_lock_torture_errors;
72
73static struct task_struct *stats_task;
74static struct task_struct **writer_tasks;
75
76static int nrealwriters_stress;
77static bool lock_is_write_held;
78
79struct lock_writer_stress_stats {
80 long n_write_lock_fail;
81 long n_write_lock_acquired;
82};
83static struct lock_writer_stress_stats *lwsa;
84
85#if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE)
86#define LOCKTORTURE_RUNNABLE_INIT 1
87#else
88#define LOCKTORTURE_RUNNABLE_INIT 0
89#endif
90int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
91module_param(locktorture_runnable, int, 0444);
92MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot");
93
94/* Forward reference. */
95static void lock_torture_cleanup(void);
96
97/*
98 * Operations vector for selecting different types of tests.
99 */
100struct lock_torture_ops {
101 void (*init)(void);
102 int (*writelock)(void);
103 void (*write_delay)(struct torture_random_state *trsp);
104 void (*writeunlock)(void);
105 unsigned long flags;
106 const char *name;
107};
108
109static struct lock_torture_ops *cur_ops;
110
111/*
112 * Definitions for lock torture testing.
113 */
114
Paul E. McKenneye0864812014-02-11 08:05:07 -0800115static int torture_lock_busted_write_lock(void)
116{
117 return 0; /* BUGGY, do not use in real life!!! */
118}
119
120static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
121{
122 const unsigned long longdelay_us = 100;
123
124 /* We want a long delay occasionally to force massive contention. */
125 if (!(torture_random(trsp) %
126 (nrealwriters_stress * 2000 * longdelay_us)))
127 mdelay(longdelay_us);
128#ifdef CONFIG_PREEMPT
129 if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
130 preempt_schedule(); /* Allow test to be preempted. */
131#endif
132}
133
134static void torture_lock_busted_write_unlock(void)
135{
136 /* BUGGY, do not use in real life!!! */
137}
138
139static struct lock_torture_ops lock_busted_ops = {
140 .writelock = torture_lock_busted_write_lock,
141 .write_delay = torture_lock_busted_write_delay,
142 .writeunlock = torture_lock_busted_write_unlock,
143 .name = "lock_busted"
144};
145
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800146static DEFINE_SPINLOCK(torture_spinlock);
147
148static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
149{
150 spin_lock(&torture_spinlock);
151 return 0;
152}
153
154static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
155{
156 const unsigned long shortdelay_us = 2;
157 const unsigned long longdelay_us = 100;
158
159 /* We want a short delay mostly to emulate likely code, and
160 * we want a long delay occasionally to force massive contention.
161 */
162 if (!(torture_random(trsp) %
163 (nrealwriters_stress * 2000 * longdelay_us)))
164 mdelay(longdelay_us);
165 if (!(torture_random(trsp) %
166 (nrealwriters_stress * 2 * shortdelay_us)))
167 udelay(shortdelay_us);
168#ifdef CONFIG_PREEMPT
169 if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
170 preempt_schedule(); /* Allow test to be preempted. */
171#endif
172}
173
174static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
175{
176 spin_unlock(&torture_spinlock);
177}
178
179static struct lock_torture_ops spin_lock_ops = {
180 .writelock = torture_spin_lock_write_lock,
181 .write_delay = torture_spin_lock_write_delay,
182 .writeunlock = torture_spin_lock_write_unlock,
183 .name = "spin_lock"
184};
185
186static int torture_spin_lock_write_lock_irq(void)
187__acquires(torture_spinlock_irq)
188{
189 unsigned long flags;
190
191 spin_lock_irqsave(&torture_spinlock, flags);
192 cur_ops->flags = flags;
193 return 0;
194}
195
196static void torture_lock_spin_write_unlock_irq(void)
197__releases(torture_spinlock)
198{
199 spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
200}
201
202static struct lock_torture_ops spin_lock_irq_ops = {
203 .writelock = torture_spin_lock_write_lock_irq,
204 .write_delay = torture_spin_lock_write_delay,
205 .writeunlock = torture_lock_spin_write_unlock_irq,
206 .name = "spin_lock_irq"
207};
208
209/*
210 * Lock torture writer kthread. Repeatedly acquires and releases
211 * the lock, checking for duplicate acquisitions.
212 */
213static int lock_torture_writer(void *arg)
214{
215 struct lock_writer_stress_stats *lwsp = arg;
216 static DEFINE_TORTURE_RANDOM(rand);
217
218 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
219 set_user_nice(current, 19);
220
221 do {
Paul E. McKenneyda601c62014-02-26 12:14:51 -0800222 if ((torture_random(&rand) & 0xfffff) == 0)
223 schedule_timeout_uninterruptible(1);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800224 cur_ops->writelock();
225 if (WARN_ON_ONCE(lock_is_write_held))
226 lwsp->n_write_lock_fail++;
227 lock_is_write_held = 1;
228 lwsp->n_write_lock_acquired++;
229 cur_ops->write_delay(&rand);
230 lock_is_write_held = 0;
231 cur_ops->writeunlock();
232 stutter_wait("lock_torture_writer");
233 } while (!torture_must_stop());
234 torture_kthread_stopping("lock_torture_writer");
235 return 0;
236}
237
238/*
239 * Create an lock-torture-statistics message in the specified buffer.
240 */
241static void lock_torture_printk(char *page)
242{
243 bool fail = 0;
244 int i;
245 long max = 0;
246 long min = lwsa[0].n_write_lock_acquired;
247 long long sum = 0;
248
249 for (i = 0; i < nrealwriters_stress; i++) {
250 if (lwsa[i].n_write_lock_fail)
251 fail = true;
252 sum += lwsa[i].n_write_lock_acquired;
253 if (max < lwsa[i].n_write_lock_fail)
254 max = lwsa[i].n_write_lock_fail;
255 if (min > lwsa[i].n_write_lock_fail)
256 min = lwsa[i].n_write_lock_fail;
257 }
258 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
259 page += sprintf(page,
260 "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
261 sum, max, min, max / 2 > min ? "???" : "",
262 fail, fail ? "!!!" : "");
263 if (fail)
264 atomic_inc(&n_lock_torture_errors);
265}
266
267/*
268 * Print torture statistics. Caller must ensure that there is only one
269 * call to this function at a given time!!! This is normally accomplished
270 * by relying on the module system to only have one copy of the module
271 * loaded, and then by giving the lock_torture_stats kthread full control
272 * (or the init/cleanup functions when lock_torture_stats thread is not
273 * running).
274 */
275static void lock_torture_stats_print(void)
276{
277 int size = nrealwriters_stress * 200 + 8192;
278 char *buf;
279
280 buf = kmalloc(size, GFP_KERNEL);
281 if (!buf) {
282 pr_err("lock_torture_stats_print: Out of memory, need: %d",
283 size);
284 return;
285 }
286 lock_torture_printk(buf);
287 pr_alert("%s", buf);
288 kfree(buf);
289}
290
291/*
292 * Periodically prints torture statistics, if periodic statistics printing
293 * was specified via the stat_interval module parameter.
294 *
295 * No need to worry about fullstop here, since this one doesn't reference
296 * volatile state or register callbacks.
297 */
298static int lock_torture_stats(void *arg)
299{
300 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
301 do {
302 schedule_timeout_interruptible(stat_interval * HZ);
303 lock_torture_stats_print();
304 torture_shutdown_absorb("lock_torture_stats");
305 } while (!torture_must_stop());
306 torture_kthread_stopping("lock_torture_stats");
307 return 0;
308}
309
310static inline void
311lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
312 const char *tag)
313{
314 pr_alert("%s" TORTURE_FLAG
315 "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
316 torture_type, tag, nrealwriters_stress, stat_interval, verbose,
317 shuffle_interval, stutter, shutdown_secs,
318 onoff_interval, onoff_holdoff);
319}
320
321static void lock_torture_cleanup(void)
322{
323 int i;
324
325 if (torture_cleanup())
326 return;
327
328 if (writer_tasks) {
329 for (i = 0; i < nrealwriters_stress; i++)
330 torture_stop_kthread(lock_torture_writer,
331 writer_tasks[i]);
332 kfree(writer_tasks);
333 writer_tasks = NULL;
334 }
335
336 torture_stop_kthread(lock_torture_stats, stats_task);
337 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
338
339 if (atomic_read(&n_lock_torture_errors))
340 lock_torture_print_module_parms(cur_ops,
341 "End of test: FAILURE");
342 else if (torture_onoff_failures())
343 lock_torture_print_module_parms(cur_ops,
344 "End of test: LOCK_HOTPLUG");
345 else
346 lock_torture_print_module_parms(cur_ops,
347 "End of test: SUCCESS");
348}
349
350static int __init lock_torture_init(void)
351{
352 int i;
353 int firsterr = 0;
354 static struct lock_torture_ops *torture_ops[] = {
Paul E. McKenneye0864812014-02-11 08:05:07 -0800355 &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800356 };
357
358 torture_init_begin(torture_type, verbose, &locktorture_runnable);
359
360 /* Process args and tell the world that the torturer is on the job. */
361 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
362 cur_ops = torture_ops[i];
363 if (strcmp(torture_type, cur_ops->name) == 0)
364 break;
365 }
366 if (i == ARRAY_SIZE(torture_ops)) {
367 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
368 torture_type);
369 pr_alert("lock-torture types:");
370 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
371 pr_alert(" %s", torture_ops[i]->name);
372 pr_alert("\n");
373 torture_init_end();
374 return -EINVAL;
375 }
376 if (cur_ops->init)
377 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
378
379 if (nwriters_stress >= 0)
380 nrealwriters_stress = nwriters_stress;
381 else
382 nrealwriters_stress = 2 * num_online_cpus();
383 lock_torture_print_module_parms(cur_ops, "Start of test");
384
385 /* Initialize the statistics so that each run gets its own numbers. */
386
387 lock_is_write_held = 0;
388 lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
389 if (lwsa == NULL) {
390 VERBOSE_TOROUT_STRING("lwsa: Out of memory");
391 firsterr = -ENOMEM;
392 goto unwind;
393 }
394 for (i = 0; i < nrealwriters_stress; i++) {
395 lwsa[i].n_write_lock_fail = 0;
396 lwsa[i].n_write_lock_acquired = 0;
397 }
398
399 /* Start up the kthreads. */
400
401 if (onoff_interval > 0) {
402 firsterr = torture_onoff_init(onoff_holdoff * HZ,
403 onoff_interval * HZ);
404 if (firsterr)
405 goto unwind;
406 }
407 if (shuffle_interval > 0) {
408 firsterr = torture_shuffle_init(shuffle_interval);
409 if (firsterr)
410 goto unwind;
411 }
412 if (shutdown_secs > 0) {
413 firsterr = torture_shutdown_init(shutdown_secs,
414 lock_torture_cleanup);
415 if (firsterr)
416 goto unwind;
417 }
418 if (stutter > 0) {
419 firsterr = torture_stutter_init(stutter);
420 if (firsterr)
421 goto unwind;
422 }
423
424 writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
425 GFP_KERNEL);
426 if (writer_tasks == NULL) {
427 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
428 firsterr = -ENOMEM;
429 goto unwind;
430 }
431 for (i = 0; i < nrealwriters_stress; i++) {
432 firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
433 writer_tasks[i]);
434 if (firsterr)
435 goto unwind;
436 }
437 if (stat_interval > 0) {
438 firsterr = torture_create_kthread(lock_torture_stats, NULL,
439 stats_task);
440 if (firsterr)
441 goto unwind;
442 }
443 torture_init_end();
444 return 0;
445
446unwind:
447 torture_init_end();
448 lock_torture_cleanup();
449 return firsterr;
450}
451
452module_init(lock_torture_init);
453module_exit(lock_torture_cleanup);