blob: 414ba45d580f471c0b1dd9f7688d86a9628a13f3 [file] [log] [blame]
Paul E. McKenney0af3fe12014-02-04 15:51:41 -08001/*
2 * Module-based torture test facility for locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014
19 *
20 * Author: Paul E. McKenney <paulmck@us.ibm.com>
21 * Based on kernel/rcu/torture.c.
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/err.h>
29#include <linux/spinlock.h>
Davidlohr Bueso42ddc752014-09-11 20:40:18 -070030#include <linux/mutex.h>
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080031#include <linux/smp.h>
32#include <linux/interrupt.h>
33#include <linux/sched.h>
34#include <linux/atomic.h>
35#include <linux/bitops.h>
36#include <linux/completion.h>
37#include <linux/moduleparam.h>
38#include <linux/percpu.h>
39#include <linux/notifier.h>
40#include <linux/reboot.h>
41#include <linux/freezer.h>
42#include <linux/cpu.h>
43#include <linux/delay.h>
44#include <linux/stat.h>
45#include <linux/slab.h>
46#include <linux/trace_clock.h>
47#include <asm/byteorder.h>
48#include <linux/torture.h>
49
50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
52
53torture_param(int, nwriters_stress, -1,
54 "Number of write-locking stress-test threads");
55torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
56torture_param(int, onoff_interval, 0,
57 "Time between CPU hotplugs (s), 0=disable");
58torture_param(int, shuffle_interval, 3,
59 "Number of jiffies between shuffles, 0=disable");
60torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
61torture_param(int, stat_interval, 60,
62 "Number of seconds between stats printk()s");
63torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
64torture_param(bool, verbose, true,
65 "Enable verbose debugging printk()s");
66
67static char *torture_type = "spin_lock";
68module_param(torture_type, charp, 0444);
69MODULE_PARM_DESC(torture_type,
Davidlohr Bueso42ddc752014-09-11 20:40:18 -070070 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080071
72static atomic_t n_lock_torture_errors;
73
74static struct task_struct *stats_task;
75static struct task_struct **writer_tasks;
76
77static int nrealwriters_stress;
78static bool lock_is_write_held;
79
80struct lock_writer_stress_stats {
81 long n_write_lock_fail;
82 long n_write_lock_acquired;
83};
84static struct lock_writer_stress_stats *lwsa;
85
Paul E. McKenneyd065eac2014-04-04 17:17:35 -070086#if defined(MODULE)
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080087#define LOCKTORTURE_RUNNABLE_INIT 1
88#else
89#define LOCKTORTURE_RUNNABLE_INIT 0
90#endif
Davidlohr Bueso23a8e5c2014-09-11 20:40:16 -070091int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
92module_param(torture_runnable, int, 0444);
93MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
Paul E. McKenney0af3fe12014-02-04 15:51:41 -080094
95/* Forward reference. */
96static void lock_torture_cleanup(void);
97
98/*
99 * Operations vector for selecting different types of tests.
100 */
101struct lock_torture_ops {
102 void (*init)(void);
103 int (*writelock)(void);
104 void (*write_delay)(struct torture_random_state *trsp);
105 void (*writeunlock)(void);
106 unsigned long flags;
107 const char *name;
108};
109
110static struct lock_torture_ops *cur_ops;
111
112/*
113 * Definitions for lock torture testing.
114 */
115
Paul E. McKenneye0864812014-02-11 08:05:07 -0800116static int torture_lock_busted_write_lock(void)
117{
118 return 0; /* BUGGY, do not use in real life!!! */
119}
120
121static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
122{
123 const unsigned long longdelay_us = 100;
124
125 /* We want a long delay occasionally to force massive contention. */
126 if (!(torture_random(trsp) %
127 (nrealwriters_stress * 2000 * longdelay_us)))
128 mdelay(longdelay_us);
129#ifdef CONFIG_PREEMPT
130 if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
131 preempt_schedule(); /* Allow test to be preempted. */
132#endif
133}
134
135static void torture_lock_busted_write_unlock(void)
136{
137 /* BUGGY, do not use in real life!!! */
138}
139
140static struct lock_torture_ops lock_busted_ops = {
141 .writelock = torture_lock_busted_write_lock,
142 .write_delay = torture_lock_busted_write_delay,
143 .writeunlock = torture_lock_busted_write_unlock,
144 .name = "lock_busted"
145};
146
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800147static DEFINE_SPINLOCK(torture_spinlock);
148
149static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
150{
151 spin_lock(&torture_spinlock);
152 return 0;
153}
154
155static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
156{
157 const unsigned long shortdelay_us = 2;
158 const unsigned long longdelay_us = 100;
159
160 /* We want a short delay mostly to emulate likely code, and
161 * we want a long delay occasionally to force massive contention.
162 */
163 if (!(torture_random(trsp) %
164 (nrealwriters_stress * 2000 * longdelay_us)))
165 mdelay(longdelay_us);
166 if (!(torture_random(trsp) %
167 (nrealwriters_stress * 2 * shortdelay_us)))
168 udelay(shortdelay_us);
169#ifdef CONFIG_PREEMPT
170 if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
171 preempt_schedule(); /* Allow test to be preempted. */
172#endif
173}
174
175static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
176{
177 spin_unlock(&torture_spinlock);
178}
179
180static struct lock_torture_ops spin_lock_ops = {
181 .writelock = torture_spin_lock_write_lock,
182 .write_delay = torture_spin_lock_write_delay,
183 .writeunlock = torture_spin_lock_write_unlock,
184 .name = "spin_lock"
185};
186
187static int torture_spin_lock_write_lock_irq(void)
188__acquires(torture_spinlock_irq)
189{
190 unsigned long flags;
191
192 spin_lock_irqsave(&torture_spinlock, flags);
193 cur_ops->flags = flags;
194 return 0;
195}
196
197static void torture_lock_spin_write_unlock_irq(void)
198__releases(torture_spinlock)
199{
200 spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
201}
202
203static struct lock_torture_ops spin_lock_irq_ops = {
204 .writelock = torture_spin_lock_write_lock_irq,
205 .write_delay = torture_spin_lock_write_delay,
206 .writeunlock = torture_lock_spin_write_unlock_irq,
207 .name = "spin_lock_irq"
208};
209
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700210static DEFINE_MUTEX(torture_mutex);
211
212static int torture_mutex_lock(void) __acquires(torture_mutex)
213{
214 mutex_lock(&torture_mutex);
215 return 0;
216}
217
218static void torture_mutex_delay(struct torture_random_state *trsp)
219{
220 const unsigned long longdelay_ms = 100;
221
222 /* We want a long delay occasionally to force massive contention. */
223 if (!(torture_random(trsp) %
224 (nrealwriters_stress * 2000 * longdelay_ms)))
225 mdelay(longdelay_ms * 5);
226 else
227 mdelay(longdelay_ms / 5);
228#ifdef CONFIG_PREEMPT
229 if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
230 preempt_schedule(); /* Allow test to be preempted. */
231#endif
232}
233
234static void torture_mutex_unlock(void) __releases(torture_mutex)
235{
236 mutex_unlock(&torture_mutex);
237}
238
239static struct lock_torture_ops mutex_lock_ops = {
240 .writelock = torture_mutex_lock,
241 .write_delay = torture_mutex_delay,
242 .writeunlock = torture_mutex_unlock,
243 .name = "mutex_lock"
244};
245
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800246/*
247 * Lock torture writer kthread. Repeatedly acquires and releases
248 * the lock, checking for duplicate acquisitions.
249 */
250static int lock_torture_writer(void *arg)
251{
252 struct lock_writer_stress_stats *lwsp = arg;
253 static DEFINE_TORTURE_RANDOM(rand);
254
255 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
Dongsheng Yang8698a742014-03-11 18:09:12 +0800256 set_user_nice(current, MAX_NICE);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800257
258 do {
Paul E. McKenneyda601c62014-02-26 12:14:51 -0800259 if ((torture_random(&rand) & 0xfffff) == 0)
260 schedule_timeout_uninterruptible(1);
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800261 cur_ops->writelock();
262 if (WARN_ON_ONCE(lock_is_write_held))
263 lwsp->n_write_lock_fail++;
264 lock_is_write_held = 1;
265 lwsp->n_write_lock_acquired++;
266 cur_ops->write_delay(&rand);
267 lock_is_write_held = 0;
268 cur_ops->writeunlock();
269 stutter_wait("lock_torture_writer");
270 } while (!torture_must_stop());
271 torture_kthread_stopping("lock_torture_writer");
272 return 0;
273}
274
275/*
276 * Create an lock-torture-statistics message in the specified buffer.
277 */
278static void lock_torture_printk(char *page)
279{
280 bool fail = 0;
281 int i;
282 long max = 0;
283 long min = lwsa[0].n_write_lock_acquired;
284 long long sum = 0;
285
286 for (i = 0; i < nrealwriters_stress; i++) {
287 if (lwsa[i].n_write_lock_fail)
288 fail = true;
289 sum += lwsa[i].n_write_lock_acquired;
290 if (max < lwsa[i].n_write_lock_fail)
291 max = lwsa[i].n_write_lock_fail;
292 if (min > lwsa[i].n_write_lock_fail)
293 min = lwsa[i].n_write_lock_fail;
294 }
295 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
296 page += sprintf(page,
297 "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
298 sum, max, min, max / 2 > min ? "???" : "",
299 fail, fail ? "!!!" : "");
300 if (fail)
301 atomic_inc(&n_lock_torture_errors);
302}
303
304/*
305 * Print torture statistics. Caller must ensure that there is only one
306 * call to this function at a given time!!! This is normally accomplished
307 * by relying on the module system to only have one copy of the module
308 * loaded, and then by giving the lock_torture_stats kthread full control
309 * (or the init/cleanup functions when lock_torture_stats thread is not
310 * running).
311 */
312static void lock_torture_stats_print(void)
313{
314 int size = nrealwriters_stress * 200 + 8192;
315 char *buf;
316
317 buf = kmalloc(size, GFP_KERNEL);
318 if (!buf) {
319 pr_err("lock_torture_stats_print: Out of memory, need: %d",
320 size);
321 return;
322 }
323 lock_torture_printk(buf);
324 pr_alert("%s", buf);
325 kfree(buf);
326}
327
328/*
329 * Periodically prints torture statistics, if periodic statistics printing
330 * was specified via the stat_interval module parameter.
331 *
332 * No need to worry about fullstop here, since this one doesn't reference
333 * volatile state or register callbacks.
334 */
335static int lock_torture_stats(void *arg)
336{
337 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
338 do {
339 schedule_timeout_interruptible(stat_interval * HZ);
340 lock_torture_stats_print();
341 torture_shutdown_absorb("lock_torture_stats");
342 } while (!torture_must_stop());
343 torture_kthread_stopping("lock_torture_stats");
344 return 0;
345}
346
347static inline void
348lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
349 const char *tag)
350{
351 pr_alert("%s" TORTURE_FLAG
352 "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
353 torture_type, tag, nrealwriters_stress, stat_interval, verbose,
354 shuffle_interval, stutter, shutdown_secs,
355 onoff_interval, onoff_holdoff);
356}
357
358static void lock_torture_cleanup(void)
359{
360 int i;
361
362 if (torture_cleanup())
363 return;
364
365 if (writer_tasks) {
366 for (i = 0; i < nrealwriters_stress; i++)
367 torture_stop_kthread(lock_torture_writer,
368 writer_tasks[i]);
369 kfree(writer_tasks);
370 writer_tasks = NULL;
371 }
372
373 torture_stop_kthread(lock_torture_stats, stats_task);
374 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
375
376 if (atomic_read(&n_lock_torture_errors))
377 lock_torture_print_module_parms(cur_ops,
378 "End of test: FAILURE");
379 else if (torture_onoff_failures())
380 lock_torture_print_module_parms(cur_ops,
381 "End of test: LOCK_HOTPLUG");
382 else
383 lock_torture_print_module_parms(cur_ops,
384 "End of test: SUCCESS");
385}
386
387static int __init lock_torture_init(void)
388{
389 int i;
390 int firsterr = 0;
391 static struct lock_torture_ops *torture_ops[] = {
Davidlohr Bueso42ddc752014-09-11 20:40:18 -0700392 &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, &mutex_lock_ops,
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800393 };
394
Davidlohr Bueso23a8e5c2014-09-11 20:40:16 -0700395 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
Paul E. McKenney52280842014-04-07 09:14:11 -0700396 return -EBUSY;
Paul E. McKenney0af3fe12014-02-04 15:51:41 -0800397
398 /* Process args and tell the world that the torturer is on the job. */
399 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
400 cur_ops = torture_ops[i];
401 if (strcmp(torture_type, cur_ops->name) == 0)
402 break;
403 }
404 if (i == ARRAY_SIZE(torture_ops)) {
405 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
406 torture_type);
407 pr_alert("lock-torture types:");
408 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
409 pr_alert(" %s", torture_ops[i]->name);
410 pr_alert("\n");
411 torture_init_end();
412 return -EINVAL;
413 }
414 if (cur_ops->init)
415 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
416
417 if (nwriters_stress >= 0)
418 nrealwriters_stress = nwriters_stress;
419 else
420 nrealwriters_stress = 2 * num_online_cpus();
421 lock_torture_print_module_parms(cur_ops, "Start of test");
422
423 /* Initialize the statistics so that each run gets its own numbers. */
424
425 lock_is_write_held = 0;
426 lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
427 if (lwsa == NULL) {
428 VERBOSE_TOROUT_STRING("lwsa: Out of memory");
429 firsterr = -ENOMEM;
430 goto unwind;
431 }
432 for (i = 0; i < nrealwriters_stress; i++) {
433 lwsa[i].n_write_lock_fail = 0;
434 lwsa[i].n_write_lock_acquired = 0;
435 }
436
437 /* Start up the kthreads. */
438
439 if (onoff_interval > 0) {
440 firsterr = torture_onoff_init(onoff_holdoff * HZ,
441 onoff_interval * HZ);
442 if (firsterr)
443 goto unwind;
444 }
445 if (shuffle_interval > 0) {
446 firsterr = torture_shuffle_init(shuffle_interval);
447 if (firsterr)
448 goto unwind;
449 }
450 if (shutdown_secs > 0) {
451 firsterr = torture_shutdown_init(shutdown_secs,
452 lock_torture_cleanup);
453 if (firsterr)
454 goto unwind;
455 }
456 if (stutter > 0) {
457 firsterr = torture_stutter_init(stutter);
458 if (firsterr)
459 goto unwind;
460 }
461
462 writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
463 GFP_KERNEL);
464 if (writer_tasks == NULL) {
465 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
466 firsterr = -ENOMEM;
467 goto unwind;
468 }
469 for (i = 0; i < nrealwriters_stress; i++) {
470 firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
471 writer_tasks[i]);
472 if (firsterr)
473 goto unwind;
474 }
475 if (stat_interval > 0) {
476 firsterr = torture_create_kthread(lock_torture_stats, NULL,
477 stats_task);
478 if (firsterr)
479 goto unwind;
480 }
481 torture_init_end();
482 return 0;
483
484unwind:
485 torture_init_end();
486 lock_torture_cleanup();
487 return firsterr;
488}
489
490module_init(lock_torture_init);
491module_exit(lock_torture_cleanup);