blob: a595a6f2615ae36528e1b7d590a0888a170ce521 [file] [log] [blame]
Ankita Garg8bb31b92006-10-02 02:17:36 -07001/*
Kees Cook426f3a52016-06-03 11:16:32 -07002 * Linux Kernel Dump Test Module for testing kernel crashes conditions:
3 * induces system failures at predefined crashpoints and under predefined
4 * operational conditions in order to evaluate the reliability of kernel
5 * sanity checking and crash dumps obtained using different dumping
6 * solutions.
Ankita Garg8bb31b92006-10-02 02:17:36 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (C) IBM Corporation, 2006
23 *
24 * Author: Ankita Garg <ankita@in.ibm.com>
25 *
Ankita Garg8bb31b92006-10-02 02:17:36 -070026 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080029 * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
Ankita Garg8bb31b92006-10-02 02:17:36 -070030 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080031 * See Documentation/fault-injection/provoke-crashes.txt for instructions
Ankita Garg8bb31b92006-10-02 02:17:36 -070032 */
Kees Cook426f3a52016-06-03 11:16:32 -070033#define pr_fmt(fmt) "lkdtm: " fmt
Ankita Garg8bb31b92006-10-02 02:17:36 -070034
35#include <linux/kernel.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080036#include <linux/fs.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070037#include <linux/module.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080038#include <linux/buffer_head.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070039#include <linux/kprobes.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080040#include <linux/list.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070041#include <linux/init.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070042#include <linux/interrupt.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080043#include <linux/hrtimer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070045#include <scsi/scsi_cmnd.h>
Simon Kagstrom0347af42010-03-05 13:42:49 -080046#include <linux/debugfs.h>
Kees Cookcc33c5372013-07-08 10:01:33 -070047#include <linux/vmalloc.h>
Kees Cook9ae113c2013-10-24 09:25:57 -070048#include <linux/mman.h>
Kees Cook1bc9fac2014-02-14 15:58:50 -080049#include <asm/cacheflush.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070050
51#ifdef CONFIG_IDE
52#include <linux/ide.h>
53#endif
54
Kees Cook9a49a522016-02-22 14:09:29 -080055#include "lkdtm.h"
56
Kees Cook7d196ac2013-10-24 09:25:39 -070057/*
58 * Make sure our attempts to over run the kernel stack doesn't trigger
59 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
60 * recurse past the end of THREAD_SIZE by default.
61 */
62#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
63#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
64#else
65#define REC_STACK_SIZE (THREAD_SIZE / 8)
66#endif
67#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
68
Ankita Garg8bb31b92006-10-02 02:17:36 -070069#define DEFAULT_COUNT 10
Kees Cookcc33c5372013-07-08 10:01:33 -070070#define EXEC_SIZE 64
Ankita Garg8bb31b92006-10-02 02:17:36 -070071
72enum cname {
Namhyung Kim93e2f582010-10-26 14:22:40 -070073 CN_INVALID,
74 CN_INT_HARDWARE_ENTRY,
75 CN_INT_HW_IRQ_EN,
76 CN_INT_TASKLET_ENTRY,
77 CN_FS_DEVRW,
78 CN_MEM_SWAPOUT,
79 CN_TIMERADD,
80 CN_SCSI_DISPATCH_CMD,
81 CN_IDE_CORE_CP,
82 CN_DIRECT,
Ankita Garg8bb31b92006-10-02 02:17:36 -070083};
84
85enum ctype {
Namhyung Kim93e2f582010-10-26 14:22:40 -070086 CT_NONE,
87 CT_PANIC,
88 CT_BUG,
Kees Cook65892722013-07-08 10:01:31 -070089 CT_WARNING,
Namhyung Kim93e2f582010-10-26 14:22:40 -070090 CT_EXCEPTION,
91 CT_LOOP,
92 CT_OVERFLOW,
93 CT_CORRUPT_STACK,
94 CT_UNALIGNED_LOAD_STORE_WRITE,
95 CT_OVERWRITE_ALLOCATION,
96 CT_WRITE_AFTER_FREE,
Laura Abbottbc0b8cc2016-02-25 16:36:42 -080097 CT_READ_AFTER_FREE,
Laura Abbott920d4512016-02-25 16:36:44 -080098 CT_WRITE_BUDDY_AFTER_FREE,
99 CT_READ_BUDDY_AFTER_FREE,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700100 CT_SOFTLOCKUP,
101 CT_HARDLOCKUP,
Kees Cook274a5852013-07-08 10:01:32 -0700102 CT_SPINLOCKUP,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700103 CT_HUNG_TASK,
Kees Cookcc33c5372013-07-08 10:01:33 -0700104 CT_EXEC_DATA,
105 CT_EXEC_STACK,
106 CT_EXEC_KMALLOC,
107 CT_EXEC_VMALLOC,
Kees Cook9a49a522016-02-22 14:09:29 -0800108 CT_EXEC_RODATA,
Kees Cook9ae113c2013-10-24 09:25:57 -0700109 CT_EXEC_USERSPACE,
110 CT_ACCESS_USERSPACE,
111 CT_WRITE_RO,
Kees Cook7cca0712016-02-17 14:41:16 -0800112 CT_WRITE_RO_AFTER_INIT,
Kees Cookdc2b9e92014-02-09 13:48:48 -0800113 CT_WRITE_KERN,
Kees Cookb5484522016-06-07 14:27:02 -0700114 CT_ATOMIC_UNDERFLOW,
115 CT_ATOMIC_OVERFLOW,
Kees Cookaa981a62016-06-03 12:06:52 -0700116 CT_USERCOPY_HEAP_SIZE_TO,
117 CT_USERCOPY_HEAP_SIZE_FROM,
118 CT_USERCOPY_HEAP_FLAG_TO,
119 CT_USERCOPY_HEAP_FLAG_FROM,
120 CT_USERCOPY_STACK_FRAME_TO,
121 CT_USERCOPY_STACK_FRAME_FROM,
122 CT_USERCOPY_STACK_BEYOND,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700123};
124
125static char* cp_name[] = {
126 "INT_HARDWARE_ENTRY",
127 "INT_HW_IRQ_EN",
128 "INT_TASKLET_ENTRY",
129 "FS_DEVRW",
130 "MEM_SWAPOUT",
131 "TIMERADD",
132 "SCSI_DISPATCH_CMD",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800133 "IDE_CORE_CP",
134 "DIRECT",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700135};
136
137static char* cp_type[] = {
138 "PANIC",
139 "BUG",
Kees Cook65892722013-07-08 10:01:31 -0700140 "WARNING",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700141 "EXCEPTION",
142 "LOOP",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800143 "OVERFLOW",
144 "CORRUPT_STACK",
145 "UNALIGNED_LOAD_STORE_WRITE",
146 "OVERWRITE_ALLOCATION",
147 "WRITE_AFTER_FREE",
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800148 "READ_AFTER_FREE",
Laura Abbott920d4512016-02-25 16:36:44 -0800149 "WRITE_BUDDY_AFTER_FREE",
150 "READ_BUDDY_AFTER_FREE",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700151 "SOFTLOCKUP",
152 "HARDLOCKUP",
Kees Cook274a5852013-07-08 10:01:32 -0700153 "SPINLOCKUP",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700154 "HUNG_TASK",
Kees Cookcc33c5372013-07-08 10:01:33 -0700155 "EXEC_DATA",
156 "EXEC_STACK",
157 "EXEC_KMALLOC",
158 "EXEC_VMALLOC",
Kees Cook9a49a522016-02-22 14:09:29 -0800159 "EXEC_RODATA",
Kees Cook9ae113c2013-10-24 09:25:57 -0700160 "EXEC_USERSPACE",
161 "ACCESS_USERSPACE",
162 "WRITE_RO",
Kees Cook7cca0712016-02-17 14:41:16 -0800163 "WRITE_RO_AFTER_INIT",
Kees Cookdc2b9e92014-02-09 13:48:48 -0800164 "WRITE_KERN",
Kees Cookb5484522016-06-07 14:27:02 -0700165 "ATOMIC_UNDERFLOW",
166 "ATOMIC_OVERFLOW",
Kees Cookaa981a62016-06-03 12:06:52 -0700167 "USERCOPY_HEAP_SIZE_TO",
168 "USERCOPY_HEAP_SIZE_FROM",
169 "USERCOPY_HEAP_FLAG_TO",
170 "USERCOPY_HEAP_FLAG_FROM",
171 "USERCOPY_STACK_FRAME_TO",
172 "USERCOPY_STACK_FRAME_FROM",
173 "USERCOPY_STACK_BEYOND",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700174};
175
176static struct jprobe lkdtm;
177
178static int lkdtm_parse_commandline(void);
179static void lkdtm_handler(void);
180
Al Viroec1c6202007-02-09 16:05:17 +0000181static char* cpoint_name;
182static char* cpoint_type;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700183static int cpoint_count = DEFAULT_COUNT;
184static int recur_count = REC_NUM_DEFAULT;
Kees Cookaa981a62016-06-03 12:06:52 -0700185static int alloc_size = 1024;
186static size_t cache_size;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700187
Namhyung Kim93e2f582010-10-26 14:22:40 -0700188static enum cname cpoint = CN_INVALID;
189static enum ctype cptype = CT_NONE;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700190static int count = DEFAULT_COUNT;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700191static DEFINE_SPINLOCK(count_lock);
Kees Cook274a5852013-07-08 10:01:32 -0700192static DEFINE_SPINLOCK(lock_me_up);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700193
Kees Cookcc33c5372013-07-08 10:01:33 -0700194static u8 data_area[EXEC_SIZE];
Kees Cookaa981a62016-06-03 12:06:52 -0700195static struct kmem_cache *bad_cache;
Kees Cookcc33c5372013-07-08 10:01:33 -0700196
Kees Cookaa981a62016-06-03 12:06:52 -0700197static const unsigned char test_text[] = "This is a test.\n";
Kees Cook9ae113c2013-10-24 09:25:57 -0700198static const unsigned long rodata = 0xAA55AA55;
Kees Cook7cca0712016-02-17 14:41:16 -0800199static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
Kees Cook9ae113c2013-10-24 09:25:57 -0700200
Ankita Garg8bb31b92006-10-02 02:17:36 -0700201module_param(recur_count, int, 0644);
Kees Cook7d196ac2013-10-24 09:25:39 -0700202MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
Rusty Russelldca41302010-08-11 23:04:21 -0600203module_param(cpoint_name, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800204MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
Rusty Russelldca41302010-08-11 23:04:21 -0600205module_param(cpoint_type, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800206MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
207 "hitting the crash point");
208module_param(cpoint_count, int, 0644);
209MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
210 "crash point is to be hit to trigger action");
Kees Cookaa981a62016-06-03 12:06:52 -0700211module_param(alloc_size, int, 0644);
212MODULE_PARM_DESC(alloc_size, " Size of allocation for user copy tests "\
213 "(from 1 to PAGE_SIZE)");
Ankita Garg8bb31b92006-10-02 02:17:36 -0700214
Adrian Bunk21181162008-02-06 01:36:50 -0800215static unsigned int jp_do_irq(unsigned int irq)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700216{
217 lkdtm_handler();
218 jprobe_return();
219 return 0;
220}
221
Adrian Bunk21181162008-02-06 01:36:50 -0800222static irqreturn_t jp_handle_irq_event(unsigned int irq,
223 struct irqaction *action)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700224{
225 lkdtm_handler();
226 jprobe_return();
227 return 0;
228}
229
Adrian Bunk21181162008-02-06 01:36:50 -0800230static void jp_tasklet_action(struct softirq_action *a)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700231{
232 lkdtm_handler();
233 jprobe_return();
234}
235
Adrian Bunk21181162008-02-06 01:36:50 -0800236static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
Ankita Garg8bb31b92006-10-02 02:17:36 -0700237{
238 lkdtm_handler();
239 jprobe_return();
240}
241
242struct scan_control;
243
Adrian Bunk21181162008-02-06 01:36:50 -0800244static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
245 struct zone *zone,
246 struct scan_control *sc)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700247{
248 lkdtm_handler();
249 jprobe_return();
250 return 0;
251}
252
Adrian Bunk21181162008-02-06 01:36:50 -0800253static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
254 const enum hrtimer_mode mode)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700255{
256 lkdtm_handler();
257 jprobe_return();
258 return 0;
259}
260
Adrian Bunk21181162008-02-06 01:36:50 -0800261static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700262{
263 lkdtm_handler();
264 jprobe_return();
265 return 0;
266}
267
268#ifdef CONFIG_IDE
Rashika Kheria44629432013-12-13 12:29:42 +0530269static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700270 struct block_device *bdev, unsigned int cmd,
271 unsigned long arg)
272{
273 lkdtm_handler();
274 jprobe_return();
275 return 0;
276}
277#endif
278
Simon Kagstrom0347af42010-03-05 13:42:49 -0800279/* Return the crashpoint number or NONE if the name is invalid */
280static enum ctype parse_cp_type(const char *what, size_t count)
281{
282 int i;
283
284 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
285 if (!strcmp(what, cp_type[i]))
286 return i + 1;
287 }
288
Namhyung Kim93e2f582010-10-26 14:22:40 -0700289 return CT_NONE;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800290}
291
292static const char *cp_type_to_str(enum ctype type)
293{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700294 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800295 return "None";
296
297 return cp_type[type - 1];
298}
299
300static const char *cp_name_to_str(enum cname name)
301{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700302 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800303 return "INVALID";
304
305 return cp_name[name - 1];
306}
307
308
Ankita Garg8bb31b92006-10-02 02:17:36 -0700309static int lkdtm_parse_commandline(void)
310{
311 int i;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700312 unsigned long flags;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700313
Simon Kagstrom0347af42010-03-05 13:42:49 -0800314 if (cpoint_count < 1 || recur_count < 1)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700315 return -EINVAL;
316
Josh Huntaa2c96d2011-06-27 16:18:08 -0700317 spin_lock_irqsave(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700318 count = cpoint_count;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700319 spin_unlock_irqrestore(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700320
Simon Kagstrom0347af42010-03-05 13:42:49 -0800321 /* No special parameters */
322 if (!cpoint_type && !cpoint_name)
323 return 0;
324
325 /* Neither or both of these need to be set */
326 if (!cpoint_type || !cpoint_name)
327 return -EINVAL;
328
329 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
Namhyung Kim93e2f582010-10-26 14:22:40 -0700330 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -0800331 return -EINVAL;
332
333 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
334 if (!strcmp(cpoint_name, cp_name[i])) {
335 cpoint = i + 1;
336 return 0;
337 }
338 }
339
340 /* Could not find a valid crash point */
341 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700342}
343
Kees Cook7d196ac2013-10-24 09:25:39 -0700344static int recursive_loop(int remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700345{
Kees Cook7d196ac2013-10-24 09:25:39 -0700346 char buf[REC_STACK_SIZE];
Ankita Garg8bb31b92006-10-02 02:17:36 -0700347
Kees Cook7d196ac2013-10-24 09:25:39 -0700348 /* Make sure compiler does not optimize this away. */
349 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
350 if (!remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700351 return 0;
352 else
Kees Cook7d196ac2013-10-24 09:25:39 -0700353 return recursive_loop(remaining - 1);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700354}
355
Kees Cookcc33c5372013-07-08 10:01:33 -0700356static void do_nothing(void)
357{
358 return;
359}
360
Kees Cookdc2b9e92014-02-09 13:48:48 -0800361/* Must immediately follow do_nothing for size calculuations to work out. */
362static void do_overwritten(void)
363{
364 pr_info("do_overwritten wasn't overwritten!\n");
365 return;
366}
367
Kees Cook629c66a2013-10-24 18:05:42 -0700368static noinline void corrupt_stack(void)
369{
370 /* Use default char array length that triggers stack protection. */
371 char data[8];
372
373 memset((void *)data, 0, 64);
374}
375
Kees Cook9a49a522016-02-22 14:09:29 -0800376static noinline void execute_location(void *dst, bool write)
Kees Cookcc33c5372013-07-08 10:01:33 -0700377{
378 void (*func)(void) = dst;
379
Kees Cookaac416f2014-02-09 13:48:47 -0800380 pr_info("attempting ok execution at %p\n", do_nothing);
381 do_nothing();
382
Kees Cook9a49a522016-02-22 14:09:29 -0800383 if (write) {
384 memcpy(dst, do_nothing, EXEC_SIZE);
385 flush_icache_range((unsigned long)dst,
386 (unsigned long)dst + EXEC_SIZE);
387 }
Kees Cookaac416f2014-02-09 13:48:47 -0800388 pr_info("attempting bad execution at %p\n", func);
Kees Cookcc33c5372013-07-08 10:01:33 -0700389 func();
390}
391
Kees Cook9ae113c2013-10-24 09:25:57 -0700392static void execute_user_location(void *dst)
393{
Kees Cook51236622013-11-11 11:23:49 -0800394 /* Intentionally crossing kernel/user memory boundary. */
Kees Cook9ae113c2013-10-24 09:25:57 -0700395 void (*func)(void) = dst;
396
Kees Cookaac416f2014-02-09 13:48:47 -0800397 pr_info("attempting ok execution at %p\n", do_nothing);
398 do_nothing();
399
Kees Cook51236622013-11-11 11:23:49 -0800400 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
Kees Cook9ae113c2013-10-24 09:25:57 -0700401 return;
Kees Cookaac416f2014-02-09 13:48:47 -0800402 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
403 pr_info("attempting bad execution at %p\n", func);
Kees Cook9ae113c2013-10-24 09:25:57 -0700404 func();
405}
406
Kees Cookaa981a62016-06-03 12:06:52 -0700407/*
408 * Instead of adding -Wno-return-local-addr, just pass the stack address
409 * through a function to obfuscate it from the compiler.
410 */
411static noinline unsigned char *trick_compiler(unsigned char *stack)
412{
413 return stack + 0;
414}
415
416static noinline unsigned char *do_usercopy_stack_callee(int value)
417{
418 unsigned char buf[32];
419 int i;
420
421 /* Exercise stack to avoid everything living in registers. */
422 for (i = 0; i < sizeof(buf); i++) {
423 buf[i] = value & 0xff;
424 }
425
426 return trick_compiler(buf);
427}
428
429static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
430{
431 unsigned long user_addr;
432 unsigned char good_stack[32];
433 unsigned char *bad_stack;
434 int i;
435
436 /* Exercise stack to avoid everything living in registers. */
437 for (i = 0; i < sizeof(good_stack); i++)
438 good_stack[i] = test_text[i % sizeof(test_text)];
439
440 /* This is a pointer to outside our current stack frame. */
441 if (bad_frame) {
442 bad_stack = do_usercopy_stack_callee(alloc_size);
443 } else {
444 /* Put start address just inside stack. */
445 bad_stack = task_stack_page(current) + THREAD_SIZE;
446 bad_stack -= sizeof(unsigned long);
447 }
448
449 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
450 PROT_READ | PROT_WRITE | PROT_EXEC,
451 MAP_ANONYMOUS | MAP_PRIVATE, 0);
452 if (user_addr >= TASK_SIZE) {
453 pr_warn("Failed to allocate user memory\n");
454 return;
455 }
456
457 if (to_user) {
458 pr_info("attempting good copy_to_user of local stack\n");
459 if (copy_to_user((void __user *)user_addr, good_stack,
460 sizeof(good_stack))) {
461 pr_warn("copy_to_user failed unexpectedly?!\n");
462 goto free_user;
463 }
464
465 pr_info("attempting bad copy_to_user of distant stack\n");
466 if (copy_to_user((void __user *)user_addr, bad_stack,
467 sizeof(good_stack))) {
468 pr_warn("copy_to_user failed, but lacked Oops\n");
469 goto free_user;
470 }
471 } else {
472 /*
473 * There isn't a safe way to not be protected by usercopy
474 * if we're going to write to another thread's stack.
475 */
476 if (!bad_frame)
477 goto free_user;
478
479 pr_info("attempting good copy_from_user of local stack\n");
480 if (copy_from_user(good_stack, (void __user *)user_addr,
481 sizeof(good_stack))) {
482 pr_warn("copy_from_user failed unexpectedly?!\n");
483 goto free_user;
484 }
485
486 pr_info("attempting bad copy_from_user of distant stack\n");
487 if (copy_from_user(bad_stack, (void __user *)user_addr,
488 sizeof(good_stack))) {
489 pr_warn("copy_from_user failed, but lacked Oops\n");
490 goto free_user;
491 }
492 }
493
494free_user:
495 vm_munmap(user_addr, PAGE_SIZE);
496}
497
498static void do_usercopy_heap_size(bool to_user)
499{
500 unsigned long user_addr;
501 unsigned char *one, *two;
502 size_t size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
503
504 one = kmalloc(size, GFP_KERNEL);
505 two = kmalloc(size, GFP_KERNEL);
506 if (!one || !two) {
507 pr_warn("Failed to allocate kernel memory\n");
508 goto free_kernel;
509 }
510
511 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
512 PROT_READ | PROT_WRITE | PROT_EXEC,
513 MAP_ANONYMOUS | MAP_PRIVATE, 0);
514 if (user_addr >= TASK_SIZE) {
515 pr_warn("Failed to allocate user memory\n");
516 goto free_kernel;
517 }
518
519 memset(one, 'A', size);
520 memset(two, 'B', size);
521
522 if (to_user) {
523 pr_info("attempting good copy_to_user of correct size\n");
524 if (copy_to_user((void __user *)user_addr, one, size)) {
525 pr_warn("copy_to_user failed unexpectedly?!\n");
526 goto free_user;
527 }
528
529 pr_info("attempting bad copy_to_user of too large size\n");
530 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
531 pr_warn("copy_to_user failed, but lacked Oops\n");
532 goto free_user;
533 }
534 } else {
535 pr_info("attempting good copy_from_user of correct size\n");
536 if (copy_from_user(one, (void __user *)user_addr,
537 size)) {
538 pr_warn("copy_from_user failed unexpectedly?!\n");
539 goto free_user;
540 }
541
542 pr_info("attempting bad copy_from_user of too large size\n");
543 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
544 pr_warn("copy_from_user failed, but lacked Oops\n");
545 goto free_user;
546 }
547 }
548
549free_user:
550 vm_munmap(user_addr, PAGE_SIZE);
551free_kernel:
552 kfree(one);
553 kfree(two);
554}
555
556static void do_usercopy_heap_flag(bool to_user)
557{
558 unsigned long user_addr;
559 unsigned char *good_buf = NULL;
560 unsigned char *bad_buf = NULL;
561
562 /* Make sure cache was prepared. */
563 if (!bad_cache) {
564 pr_warn("Failed to allocate kernel cache\n");
565 return;
566 }
567
568 /*
569 * Allocate one buffer from each cache (kmalloc will have the
570 * SLAB_USERCOPY flag already, but "bad_cache" won't).
571 */
572 good_buf = kmalloc(cache_size, GFP_KERNEL);
573 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
574 if (!good_buf || !bad_buf) {
575 pr_warn("Failed to allocate buffers from caches\n");
576 goto free_alloc;
577 }
578
579 /* Allocate user memory we'll poke at. */
580 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
581 PROT_READ | PROT_WRITE | PROT_EXEC,
582 MAP_ANONYMOUS | MAP_PRIVATE, 0);
583 if (user_addr >= TASK_SIZE) {
584 pr_warn("Failed to allocate user memory\n");
585 goto free_alloc;
586 }
587
588 memset(good_buf, 'A', cache_size);
589 memset(bad_buf, 'B', cache_size);
590
591 if (to_user) {
592 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
593 if (copy_to_user((void __user *)user_addr, good_buf,
594 cache_size)) {
595 pr_warn("copy_to_user failed unexpectedly?!\n");
596 goto free_user;
597 }
598
599 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
600 if (copy_to_user((void __user *)user_addr, bad_buf,
601 cache_size)) {
602 pr_warn("copy_to_user failed, but lacked Oops\n");
603 goto free_user;
604 }
605 } else {
606 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
607 if (copy_from_user(good_buf, (void __user *)user_addr,
608 cache_size)) {
609 pr_warn("copy_from_user failed unexpectedly?!\n");
610 goto free_user;
611 }
612
613 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
614 if (copy_from_user(bad_buf, (void __user *)user_addr,
615 cache_size)) {
616 pr_warn("copy_from_user failed, but lacked Oops\n");
617 goto free_user;
618 }
619 }
620
621free_user:
622 vm_munmap(user_addr, PAGE_SIZE);
623free_alloc:
624 if (bad_buf)
625 kmem_cache_free(bad_cache, bad_buf);
626 kfree(good_buf);
627}
628
Simon Kagstrom0347af42010-03-05 13:42:49 -0800629static void lkdtm_do_action(enum ctype which)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700630{
Simon Kagstrom0347af42010-03-05 13:42:49 -0800631 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -0700632 case CT_PANIC:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800633 panic("dumptest");
634 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700635 case CT_BUG:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800636 BUG();
637 break;
Kees Cook65892722013-07-08 10:01:31 -0700638 case CT_WARNING:
639 WARN_ON(1);
640 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700641 case CT_EXCEPTION:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800642 *((int *) 0) = 0;
643 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700644 case CT_LOOP:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800645 for (;;)
646 ;
647 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700648 case CT_OVERFLOW:
Kees Cook7d196ac2013-10-24 09:25:39 -0700649 (void) recursive_loop(recur_count);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800650 break;
Kees Cook629c66a2013-10-24 18:05:42 -0700651 case CT_CORRUPT_STACK:
652 corrupt_stack();
Simon Kagstrom0347af42010-03-05 13:42:49 -0800653 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700654 case CT_UNALIGNED_LOAD_STORE_WRITE: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800655 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
656 3, 4, 5};
657 u32 *p;
658 u32 val = 0x12345678;
659
660 p = (u32 *)(data + 1);
661 if (*p == 0)
662 val = 0x87654321;
663 *p = val;
664 break;
665 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700666 case CT_OVERWRITE_ALLOCATION: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800667 size_t len = 1020;
668 u32 *data = kmalloc(len, GFP_KERNEL);
669
670 data[1024 / sizeof(u32)] = 0x12345678;
671 kfree(data);
672 break;
673 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700674 case CT_WRITE_AFTER_FREE: {
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800675 int *base, *again;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800676 size_t len = 1024;
Laura Abbott250a8982016-02-25 16:36:43 -0800677 /*
678 * The slub allocator uses the first word to store the free
679 * pointer in some configurations. Use the middle of the
680 * allocation to avoid running into the freelist
681 */
682 size_t offset = (len / sizeof(*base)) / 2;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800683
Laura Abbott250a8982016-02-25 16:36:43 -0800684 base = kmalloc(len, GFP_KERNEL);
685 pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
Laura Abbott250a8982016-02-25 16:36:43 -0800686 pr_info("Attempting bad write to freed memory at %p\n",
687 &base[offset]);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800688 kfree(base);
Laura Abbott250a8982016-02-25 16:36:43 -0800689 base[offset] = 0x0abcdef0;
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800690 /* Attempt to notice the overwrite. */
691 again = kmalloc(len, GFP_KERNEL);
692 kfree(again);
693 if (again != base)
694 pr_info("Hmm, didn't get the same memory range.\n");
695
Simon Kagstrom0347af42010-03-05 13:42:49 -0800696 break;
697 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800698 case CT_READ_AFTER_FREE: {
699 int *base, *val, saw;
700 size_t len = 1024;
701 /*
702 * The slub allocator uses the first word to store the free
703 * pointer in some configurations. Use the middle of the
704 * allocation to avoid running into the freelist
705 */
706 size_t offset = (len / sizeof(*base)) / 2;
707
708 base = kmalloc(len, GFP_KERNEL);
709 if (!base)
710 break;
711
712 val = kmalloc(len, GFP_KERNEL);
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530713 if (!val) {
714 kfree(base);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800715 break;
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530716 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800717
718 *val = 0x12345678;
719 base[offset] = *val;
720 pr_info("Value in memory before free: %x\n", base[offset]);
721
722 kfree(base);
723
724 pr_info("Attempting bad read from freed memory\n");
725 saw = base[offset];
726 if (saw != *val) {
727 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800728 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800729 BUG();
730 }
731 pr_info("Memory was not poisoned\n");
732
733 kfree(val);
734 break;
735 }
Laura Abbott920d4512016-02-25 16:36:44 -0800736 case CT_WRITE_BUDDY_AFTER_FREE: {
737 unsigned long p = __get_free_page(GFP_KERNEL);
738 if (!p)
739 break;
740 pr_info("Writing to the buddy page before free\n");
741 memset((void *)p, 0x3, PAGE_SIZE);
742 free_page(p);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800743 schedule();
Laura Abbott920d4512016-02-25 16:36:44 -0800744 pr_info("Attempting bad write to the buddy page after free\n");
745 memset((void *)p, 0x78, PAGE_SIZE);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800746 /* Attempt to notice the overwrite. */
747 p = __get_free_page(GFP_KERNEL);
748 free_page(p);
749 schedule();
750
Laura Abbott920d4512016-02-25 16:36:44 -0800751 break;
752 }
753 case CT_READ_BUDDY_AFTER_FREE: {
754 unsigned long p = __get_free_page(GFP_KERNEL);
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530755 int saw, *val;
Laura Abbott920d4512016-02-25 16:36:44 -0800756 int *base;
757
758 if (!p)
759 break;
760
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530761 val = kmalloc(1024, GFP_KERNEL);
Kees Cook3d085c72016-04-06 15:53:27 -0700762 if (!val) {
763 free_page(p);
Laura Abbott920d4512016-02-25 16:36:44 -0800764 break;
Kees Cook3d085c72016-04-06 15:53:27 -0700765 }
Laura Abbott920d4512016-02-25 16:36:44 -0800766
767 base = (int *)p;
768
769 *val = 0x12345678;
770 base[0] = *val;
771 pr_info("Value in memory before free: %x\n", base[0]);
772 free_page(p);
773 pr_info("Attempting to read from freed memory\n");
774 saw = base[0];
775 if (saw != *val) {
776 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800777 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbott920d4512016-02-25 16:36:44 -0800778 BUG();
779 }
780 pr_info("Buddy page was not poisoned\n");
781
782 kfree(val);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800783 break;
784 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700785 case CT_SOFTLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700786 preempt_disable();
787 for (;;)
788 cpu_relax();
789 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700790 case CT_HARDLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700791 local_irq_disable();
792 for (;;)
793 cpu_relax();
794 break;
Kees Cook274a5852013-07-08 10:01:32 -0700795 case CT_SPINLOCKUP:
796 /* Must be called twice to trigger. */
797 spin_lock(&lock_me_up);
Kees Cook51236622013-11-11 11:23:49 -0800798 /* Let sparse know we intended to exit holding the lock. */
799 __release(&lock_me_up);
Kees Cook274a5852013-07-08 10:01:32 -0700800 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700801 case CT_HUNG_TASK:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700802 set_current_state(TASK_UNINTERRUPTIBLE);
803 schedule();
804 break;
Kees Cookcc33c5372013-07-08 10:01:33 -0700805 case CT_EXEC_DATA:
Kees Cook9a49a522016-02-22 14:09:29 -0800806 execute_location(data_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700807 break;
808 case CT_EXEC_STACK: {
809 u8 stack_area[EXEC_SIZE];
Kees Cook9a49a522016-02-22 14:09:29 -0800810 execute_location(stack_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700811 break;
812 }
813 case CT_EXEC_KMALLOC: {
814 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
Kees Cook9a49a522016-02-22 14:09:29 -0800815 execute_location(kmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700816 kfree(kmalloc_area);
817 break;
818 }
819 case CT_EXEC_VMALLOC: {
820 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
Kees Cook9a49a522016-02-22 14:09:29 -0800821 execute_location(vmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700822 vfree(vmalloc_area);
823 break;
824 }
Kees Cook9a49a522016-02-22 14:09:29 -0800825 case CT_EXEC_RODATA:
826 execute_location(lkdtm_rodata_do_nothing, false);
827 break;
Kees Cook9ae113c2013-10-24 09:25:57 -0700828 case CT_EXEC_USERSPACE: {
829 unsigned long user_addr;
830
831 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
832 PROT_READ | PROT_WRITE | PROT_EXEC,
833 MAP_ANONYMOUS | MAP_PRIVATE, 0);
834 if (user_addr >= TASK_SIZE) {
835 pr_warn("Failed to allocate user memory\n");
836 return;
837 }
838 execute_user_location((void *)user_addr);
839 vm_munmap(user_addr, PAGE_SIZE);
840 break;
841 }
842 case CT_ACCESS_USERSPACE: {
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400843 unsigned long user_addr, tmp = 0;
Kees Cook9ae113c2013-10-24 09:25:57 -0700844 unsigned long *ptr;
845
846 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
847 PROT_READ | PROT_WRITE | PROT_EXEC,
848 MAP_ANONYMOUS | MAP_PRIVATE, 0);
849 if (user_addr >= TASK_SIZE) {
850 pr_warn("Failed to allocate user memory\n");
851 return;
852 }
853
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400854 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
855 pr_warn("copy_to_user failed\n");
856 vm_munmap(user_addr, PAGE_SIZE);
857 return;
858 }
859
Kees Cook9ae113c2013-10-24 09:25:57 -0700860 ptr = (unsigned long *)user_addr;
Kees Cookaac416f2014-02-09 13:48:47 -0800861
862 pr_info("attempting bad read at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700863 tmp = *ptr;
864 tmp += 0xc0dec0de;
Kees Cookaac416f2014-02-09 13:48:47 -0800865
866 pr_info("attempting bad write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700867 *ptr = tmp;
868
869 vm_munmap(user_addr, PAGE_SIZE);
870
871 break;
872 }
873 case CT_WRITE_RO: {
Kees Cook7cca0712016-02-17 14:41:16 -0800874 /* Explicitly cast away "const" for the test. */
875 unsigned long *ptr = (unsigned long *)&rodata;
Kees Cook9ae113c2013-10-24 09:25:57 -0700876
Kees Cook7cca0712016-02-17 14:41:16 -0800877 pr_info("attempting bad rodata write at %p\n", ptr);
878 *ptr ^= 0xabcd1234;
Kees Cookaac416f2014-02-09 13:48:47 -0800879
Kees Cook7cca0712016-02-17 14:41:16 -0800880 break;
881 }
882 case CT_WRITE_RO_AFTER_INIT: {
883 unsigned long *ptr = &ro_after_init;
884
885 /*
886 * Verify we were written to during init. Since an Oops
887 * is considered a "success", a failure is to just skip the
888 * real test.
889 */
890 if ((*ptr & 0xAA) != 0xAA) {
891 pr_info("%p was NOT written during init!?\n", ptr);
892 break;
893 }
894
895 pr_info("attempting bad ro_after_init write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700896 *ptr ^= 0xabcd1234;
897
898 break;
899 }
Kees Cookdc2b9e92014-02-09 13:48:48 -0800900 case CT_WRITE_KERN: {
901 size_t size;
902 unsigned char *ptr;
903
904 size = (unsigned long)do_overwritten -
905 (unsigned long)do_nothing;
906 ptr = (unsigned char *)do_overwritten;
907
908 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
909 memcpy(ptr, (unsigned char *)do_nothing, size);
910 flush_icache_range((unsigned long)ptr,
911 (unsigned long)(ptr + size));
912
913 do_overwritten();
914 break;
915 }
Kees Cookb5484522016-06-07 14:27:02 -0700916 case CT_ATOMIC_UNDERFLOW: {
David Windsor5fd9e482015-12-17 00:56:36 -0500917 atomic_t under = ATOMIC_INIT(INT_MIN);
Kees Cookb5484522016-06-07 14:27:02 -0700918
919 pr_info("attempting good atomic increment\n");
920 atomic_inc(&under);
921 atomic_dec(&under);
922
923 pr_info("attempting bad atomic underflow\n");
924 atomic_dec(&under);
925 break;
926 }
927 case CT_ATOMIC_OVERFLOW: {
David Windsor5fd9e482015-12-17 00:56:36 -0500928 atomic_t over = ATOMIC_INIT(INT_MAX);
929
Kees Cookb5484522016-06-07 14:27:02 -0700930 pr_info("attempting good atomic decrement\n");
931 atomic_dec(&over);
932 atomic_inc(&over);
933
934 pr_info("attempting bad atomic overflow\n");
David Windsor5fd9e482015-12-17 00:56:36 -0500935 atomic_inc(&over);
936
937 return;
938 }
Kees Cookaa981a62016-06-03 12:06:52 -0700939 case CT_USERCOPY_HEAP_SIZE_TO:
940 do_usercopy_heap_size(true);
941 break;
942 case CT_USERCOPY_HEAP_SIZE_FROM:
943 do_usercopy_heap_size(false);
944 break;
945 case CT_USERCOPY_HEAP_FLAG_TO:
946 do_usercopy_heap_flag(true);
947 break;
948 case CT_USERCOPY_HEAP_FLAG_FROM:
949 do_usercopy_heap_flag(false);
950 break;
951 case CT_USERCOPY_STACK_FRAME_TO:
952 do_usercopy_stack(true, true);
953 break;
954 case CT_USERCOPY_STACK_FRAME_FROM:
955 do_usercopy_stack(false, true);
956 break;
957 case CT_USERCOPY_STACK_BEYOND:
958 do_usercopy_stack(true, false);
959 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700960 case CT_NONE:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800961 default:
962 break;
963 }
964
965}
966
967static void lkdtm_handler(void)
968{
Josh Huntaa2c96d2011-06-27 16:18:08 -0700969 unsigned long flags;
Cong Wang92618182012-02-03 15:37:15 -0800970 bool do_it = false;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700971
972 spin_lock_irqsave(&count_lock, flags);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800973 count--;
Kees Cookfeac6e22014-02-09 13:48:46 -0800974 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
975 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700976
977 if (count == 0) {
Cong Wang92618182012-02-03 15:37:15 -0800978 do_it = true;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700979 count = cpoint_count;
980 }
Josh Huntaa2c96d2011-06-27 16:18:08 -0700981 spin_unlock_irqrestore(&count_lock, flags);
Cong Wang92618182012-02-03 15:37:15 -0800982
983 if (do_it)
984 lkdtm_do_action(cptype);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700985}
986
Simon Kagstrom0347af42010-03-05 13:42:49 -0800987static int lkdtm_register_cpoint(enum cname which)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700988{
989 int ret;
990
Namhyung Kim93e2f582010-10-26 14:22:40 -0700991 cpoint = CN_INVALID;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800992 if (lkdtm.entry != NULL)
993 unregister_jprobe(&lkdtm);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700994
Simon Kagstrom0347af42010-03-05 13:42:49 -0800995 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -0700996 case CN_DIRECT:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800997 lkdtm_do_action(cptype);
998 return 0;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700999 case CN_INT_HARDWARE_ENTRY:
M. Mohan Kumarf58f2fa2009-09-22 16:43:29 -07001000 lkdtm.kp.symbol_name = "do_IRQ";
Ankita Garg8bb31b92006-10-02 02:17:36 -07001001 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
1002 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001003 case CN_INT_HW_IRQ_EN:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001004 lkdtm.kp.symbol_name = "handle_IRQ_event";
1005 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
1006 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001007 case CN_INT_TASKLET_ENTRY:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001008 lkdtm.kp.symbol_name = "tasklet_action";
1009 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
1010 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001011 case CN_FS_DEVRW:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001012 lkdtm.kp.symbol_name = "ll_rw_block";
1013 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
1014 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001015 case CN_MEM_SWAPOUT:
Ankita Garg18a61e42006-11-05 23:52:07 -08001016 lkdtm.kp.symbol_name = "shrink_inactive_list";
1017 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001018 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001019 case CN_TIMERADD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001020 lkdtm.kp.symbol_name = "hrtimer_start";
1021 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
1022 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001023 case CN_SCSI_DISPATCH_CMD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001024 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
1025 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
1026 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001027 case CN_IDE_CORE_CP:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001028#ifdef CONFIG_IDE
1029 lkdtm.kp.symbol_name = "generic_ide_ioctl";
1030 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
1031#else
Kees Cookfeac6e22014-02-09 13:48:46 -08001032 pr_info("Crash point not available\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001033 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001034#endif
1035 break;
1036 default:
Kees Cookfeac6e22014-02-09 13:48:46 -08001037 pr_info("Invalid Crash Point\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001038 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001039 }
1040
Simon Kagstrom0347af42010-03-05 13:42:49 -08001041 cpoint = which;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001042 if ((ret = register_jprobe(&lkdtm)) < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001043 pr_info("Couldn't register jprobe\n");
Namhyung Kim93e2f582010-10-26 14:22:40 -07001044 cpoint = CN_INVALID;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001045 }
1046
Simon Kagstrom0347af42010-03-05 13:42:49 -08001047 return ret;
1048}
1049
1050static ssize_t do_register_entry(enum cname which, struct file *f,
1051 const char __user *user_buf, size_t count, loff_t *off)
1052{
1053 char *buf;
1054 int err;
1055
1056 if (count >= PAGE_SIZE)
1057 return -EINVAL;
1058
1059 buf = (char *)__get_free_page(GFP_KERNEL);
1060 if (!buf)
1061 return -ENOMEM;
1062 if (copy_from_user(buf, user_buf, count)) {
1063 free_page((unsigned long) buf);
1064 return -EFAULT;
1065 }
1066 /* NULL-terminate and remove enter */
1067 buf[count] = '\0';
1068 strim(buf);
1069
1070 cptype = parse_cp_type(buf, count);
1071 free_page((unsigned long) buf);
1072
Namhyung Kim93e2f582010-10-26 14:22:40 -07001073 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001074 return -EINVAL;
1075
1076 err = lkdtm_register_cpoint(which);
1077 if (err < 0)
1078 return err;
1079
1080 *off += count;
1081
1082 return count;
1083}
1084
1085/* Generic read callback that just prints out the available crash types */
1086static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
1087 size_t count, loff_t *off)
1088{
1089 char *buf;
1090 int i, n, out;
1091
1092 buf = (char *)__get_free_page(GFP_KERNEL);
Alan Cox086ff4b2012-07-30 14:43:24 -07001093 if (buf == NULL)
1094 return -ENOMEM;
Simon Kagstrom0347af42010-03-05 13:42:49 -08001095
1096 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
1097 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
1098 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
1099 buf[n] = '\0';
1100
1101 out = simple_read_from_buffer(user_buf, count, off,
1102 buf, n);
1103 free_page((unsigned long) buf);
1104
1105 return out;
1106}
1107
1108static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
1109{
Ankita Garg8bb31b92006-10-02 02:17:36 -07001110 return 0;
1111}
1112
Simon Kagstrom0347af42010-03-05 13:42:49 -08001113
1114static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
1115 size_t count, loff_t *off)
1116{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001117 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001118}
1119
1120static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
1121 size_t count, loff_t *off)
1122{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001123 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001124}
1125
1126static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
1127 size_t count, loff_t *off)
1128{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001129 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001130}
1131
1132static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
1133 size_t count, loff_t *off)
1134{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001135 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001136}
1137
1138static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
1139 size_t count, loff_t *off)
1140{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001141 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001142}
1143
1144static ssize_t timeradd_entry(struct file *f, const char __user *buf,
1145 size_t count, loff_t *off)
1146{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001147 return do_register_entry(CN_TIMERADD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001148}
1149
1150static ssize_t scsi_dispatch_cmd_entry(struct file *f,
1151 const char __user *buf, size_t count, loff_t *off)
1152{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001153 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001154}
1155
1156static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
1157 size_t count, loff_t *off)
1158{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001159 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001160}
1161
1162/* Special entry to just crash directly. Available without KPROBEs */
1163static ssize_t direct_entry(struct file *f, const char __user *user_buf,
1164 size_t count, loff_t *off)
1165{
1166 enum ctype type;
1167 char *buf;
1168
1169 if (count >= PAGE_SIZE)
1170 return -EINVAL;
1171 if (count < 1)
1172 return -EINVAL;
1173
1174 buf = (char *)__get_free_page(GFP_KERNEL);
1175 if (!buf)
1176 return -ENOMEM;
1177 if (copy_from_user(buf, user_buf, count)) {
1178 free_page((unsigned long) buf);
1179 return -EFAULT;
1180 }
1181 /* NULL-terminate and remove enter */
1182 buf[count] = '\0';
1183 strim(buf);
1184
1185 type = parse_cp_type(buf, count);
1186 free_page((unsigned long) buf);
Namhyung Kim93e2f582010-10-26 14:22:40 -07001187 if (type == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001188 return -EINVAL;
1189
Kees Cookfeac6e22014-02-09 13:48:46 -08001190 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
Simon Kagstrom0347af42010-03-05 13:42:49 -08001191 lkdtm_do_action(type);
1192 *off += count;
1193
1194 return count;
1195}
1196
1197struct crash_entry {
1198 const char *name;
1199 const struct file_operations fops;
1200};
1201
1202static const struct crash_entry crash_entries[] = {
1203 {"DIRECT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001204 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001205 .open = lkdtm_debugfs_open,
1206 .write = direct_entry} },
1207 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001208 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001209 .open = lkdtm_debugfs_open,
1210 .write = int_hardware_entry} },
1211 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001212 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001213 .open = lkdtm_debugfs_open,
1214 .write = int_hw_irq_en} },
1215 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001216 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001217 .open = lkdtm_debugfs_open,
1218 .write = int_tasklet_entry} },
1219 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001220 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001221 .open = lkdtm_debugfs_open,
1222 .write = fs_devrw_entry} },
1223 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001224 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001225 .open = lkdtm_debugfs_open,
1226 .write = mem_swapout_entry} },
1227 {"TIMERADD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001228 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001229 .open = lkdtm_debugfs_open,
1230 .write = timeradd_entry} },
1231 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001232 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001233 .open = lkdtm_debugfs_open,
1234 .write = scsi_dispatch_cmd_entry} },
1235 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001236 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001237 .open = lkdtm_debugfs_open,
1238 .write = ide_core_cp_entry} },
1239};
1240
1241static struct dentry *lkdtm_debugfs_root;
1242
1243static int __init lkdtm_module_init(void)
1244{
1245 int ret = -EINVAL;
1246 int n_debugfs_entries = 1; /* Assume only the direct entry */
1247 int i;
1248
Kees Cook7cca0712016-02-17 14:41:16 -08001249 /* Make sure we can write to __ro_after_init values during __init */
1250 ro_after_init |= 0xAA;
1251
Kees Cookaa981a62016-06-03 12:06:52 -07001252 /* Prepare cache that lacks SLAB_USERCOPY flag. */
1253 cache_size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
1254 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
1255 0, NULL);
1256
Simon Kagstrom0347af42010-03-05 13:42:49 -08001257 /* Register debugfs interface */
1258 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
1259 if (!lkdtm_debugfs_root) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001260 pr_err("creating root dir failed\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001261 return -ENODEV;
1262 }
1263
1264#ifdef CONFIG_KPROBES
1265 n_debugfs_entries = ARRAY_SIZE(crash_entries);
1266#endif
1267
1268 for (i = 0; i < n_debugfs_entries; i++) {
1269 const struct crash_entry *cur = &crash_entries[i];
1270 struct dentry *de;
1271
1272 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
1273 NULL, &cur->fops);
1274 if (de == NULL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001275 pr_err("could not create %s\n", cur->name);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001276 goto out_err;
1277 }
1278 }
1279
1280 if (lkdtm_parse_commandline() == -EINVAL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001281 pr_info("Invalid command\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001282 goto out_err;
1283 }
1284
Namhyung Kim93e2f582010-10-26 14:22:40 -07001285 if (cpoint != CN_INVALID && cptype != CT_NONE) {
Simon Kagstrom0347af42010-03-05 13:42:49 -08001286 ret = lkdtm_register_cpoint(cpoint);
1287 if (ret < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001288 pr_info("Invalid crash point %d\n", cpoint);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001289 goto out_err;
1290 }
Kees Cookfeac6e22014-02-09 13:48:46 -08001291 pr_info("Crash point %s of type %s registered\n",
1292 cpoint_name, cpoint_type);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001293 } else {
Kees Cookfeac6e22014-02-09 13:48:46 -08001294 pr_info("No crash points registered, enable through debugfs\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001295 }
1296
1297 return 0;
1298
1299out_err:
1300 debugfs_remove_recursive(lkdtm_debugfs_root);
1301 return ret;
1302}
1303
Adrian Bunk21181162008-02-06 01:36:50 -08001304static void __exit lkdtm_module_exit(void)
Ankita Garg8bb31b92006-10-02 02:17:36 -07001305{
Simon Kagstrom0347af42010-03-05 13:42:49 -08001306 debugfs_remove_recursive(lkdtm_debugfs_root);
1307
Kees Cookaa981a62016-06-03 12:06:52 -07001308 kmem_cache_destroy(bad_cache);
1309
Simon Kagstrom0347af42010-03-05 13:42:49 -08001310 unregister_jprobe(&lkdtm);
Kees Cookfeac6e22014-02-09 13:48:46 -08001311 pr_info("Crash point unregistered\n");
Ankita Garg8bb31b92006-10-02 02:17:36 -07001312}
1313
1314module_init(lkdtm_module_init);
1315module_exit(lkdtm_module_exit);
1316
1317MODULE_LICENSE("GPL");
Terry Chiada869202014-07-02 21:02:25 +08001318MODULE_DESCRIPTION("Kprobe module for testing crash dumps");