blob: 3fe4b5dee9558d9f4f1594c8826737796793c411 [file] [log] [blame]
Ankita Garg8bb31b92006-10-02 02:17:36 -07001/*
Kees Cook426f3a52016-06-03 11:16:32 -07002 * Linux Kernel Dump Test Module for testing kernel crashes conditions:
3 * induces system failures at predefined crashpoints and under predefined
4 * operational conditions in order to evaluate the reliability of kernel
5 * sanity checking and crash dumps obtained using different dumping
6 * solutions.
Ankita Garg8bb31b92006-10-02 02:17:36 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (C) IBM Corporation, 2006
23 *
24 * Author: Ankita Garg <ankita@in.ibm.com>
25 *
Ankita Garg8bb31b92006-10-02 02:17:36 -070026 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080029 * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
Ankita Garg8bb31b92006-10-02 02:17:36 -070030 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080031 * See Documentation/fault-injection/provoke-crashes.txt for instructions
Ankita Garg8bb31b92006-10-02 02:17:36 -070032 */
Kees Cook426f3a52016-06-03 11:16:32 -070033#define pr_fmt(fmt) "lkdtm: " fmt
Ankita Garg8bb31b92006-10-02 02:17:36 -070034
35#include <linux/kernel.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080036#include <linux/fs.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070037#include <linux/module.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080038#include <linux/buffer_head.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070039#include <linux/kprobes.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080040#include <linux/list.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070041#include <linux/init.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070042#include <linux/interrupt.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080043#include <linux/hrtimer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070045#include <scsi/scsi_cmnd.h>
Simon Kagstrom0347af42010-03-05 13:42:49 -080046#include <linux/debugfs.h>
Kees Cookcc33c5372013-07-08 10:01:33 -070047#include <linux/vmalloc.h>
Kees Cook9ae113c2013-10-24 09:25:57 -070048#include <linux/mman.h>
Kees Cook1bc9fac2014-02-14 15:58:50 -080049#include <asm/cacheflush.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070050
51#ifdef CONFIG_IDE
52#include <linux/ide.h>
53#endif
54
Kees Cook9a49a522016-02-22 14:09:29 -080055#include "lkdtm.h"
56
Kees Cook7d196ac2013-10-24 09:25:39 -070057/*
58 * Make sure our attempts to over run the kernel stack doesn't trigger
59 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
60 * recurse past the end of THREAD_SIZE by default.
61 */
62#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
63#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
64#else
65#define REC_STACK_SIZE (THREAD_SIZE / 8)
66#endif
67#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
68
Ankita Garg8bb31b92006-10-02 02:17:36 -070069#define DEFAULT_COUNT 10
Kees Cookcc33c5372013-07-08 10:01:33 -070070#define EXEC_SIZE 64
Ankita Garg8bb31b92006-10-02 02:17:36 -070071
72enum cname {
Namhyung Kim93e2f582010-10-26 14:22:40 -070073 CN_INVALID,
74 CN_INT_HARDWARE_ENTRY,
75 CN_INT_HW_IRQ_EN,
76 CN_INT_TASKLET_ENTRY,
77 CN_FS_DEVRW,
78 CN_MEM_SWAPOUT,
79 CN_TIMERADD,
80 CN_SCSI_DISPATCH_CMD,
81 CN_IDE_CORE_CP,
82 CN_DIRECT,
Ankita Garg8bb31b92006-10-02 02:17:36 -070083};
84
85enum ctype {
Namhyung Kim93e2f582010-10-26 14:22:40 -070086 CT_NONE,
87 CT_PANIC,
88 CT_BUG,
Kees Cook65892722013-07-08 10:01:31 -070089 CT_WARNING,
Namhyung Kim93e2f582010-10-26 14:22:40 -070090 CT_EXCEPTION,
91 CT_LOOP,
92 CT_OVERFLOW,
93 CT_CORRUPT_STACK,
94 CT_UNALIGNED_LOAD_STORE_WRITE,
95 CT_OVERWRITE_ALLOCATION,
96 CT_WRITE_AFTER_FREE,
Laura Abbottbc0b8cc2016-02-25 16:36:42 -080097 CT_READ_AFTER_FREE,
Laura Abbott920d4512016-02-25 16:36:44 -080098 CT_WRITE_BUDDY_AFTER_FREE,
99 CT_READ_BUDDY_AFTER_FREE,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700100 CT_SOFTLOCKUP,
101 CT_HARDLOCKUP,
Kees Cook274a5852013-07-08 10:01:32 -0700102 CT_SPINLOCKUP,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700103 CT_HUNG_TASK,
Kees Cookcc33c5372013-07-08 10:01:33 -0700104 CT_EXEC_DATA,
105 CT_EXEC_STACK,
106 CT_EXEC_KMALLOC,
107 CT_EXEC_VMALLOC,
Kees Cook9a49a522016-02-22 14:09:29 -0800108 CT_EXEC_RODATA,
Kees Cook9ae113c2013-10-24 09:25:57 -0700109 CT_EXEC_USERSPACE,
110 CT_ACCESS_USERSPACE,
111 CT_WRITE_RO,
Kees Cook7cca0712016-02-17 14:41:16 -0800112 CT_WRITE_RO_AFTER_INIT,
Kees Cookdc2b9e92014-02-09 13:48:48 -0800113 CT_WRITE_KERN,
Kees Cookaa981a62016-06-03 12:06:52 -0700114 CT_WRAP_ATOMIC,
115 CT_USERCOPY_HEAP_SIZE_TO,
116 CT_USERCOPY_HEAP_SIZE_FROM,
117 CT_USERCOPY_HEAP_FLAG_TO,
118 CT_USERCOPY_HEAP_FLAG_FROM,
119 CT_USERCOPY_STACK_FRAME_TO,
120 CT_USERCOPY_STACK_FRAME_FROM,
121 CT_USERCOPY_STACK_BEYOND,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700122};
123
124static char* cp_name[] = {
125 "INT_HARDWARE_ENTRY",
126 "INT_HW_IRQ_EN",
127 "INT_TASKLET_ENTRY",
128 "FS_DEVRW",
129 "MEM_SWAPOUT",
130 "TIMERADD",
131 "SCSI_DISPATCH_CMD",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800132 "IDE_CORE_CP",
133 "DIRECT",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700134};
135
136static char* cp_type[] = {
137 "PANIC",
138 "BUG",
Kees Cook65892722013-07-08 10:01:31 -0700139 "WARNING",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700140 "EXCEPTION",
141 "LOOP",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800142 "OVERFLOW",
143 "CORRUPT_STACK",
144 "UNALIGNED_LOAD_STORE_WRITE",
145 "OVERWRITE_ALLOCATION",
146 "WRITE_AFTER_FREE",
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800147 "READ_AFTER_FREE",
Laura Abbott920d4512016-02-25 16:36:44 -0800148 "WRITE_BUDDY_AFTER_FREE",
149 "READ_BUDDY_AFTER_FREE",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700150 "SOFTLOCKUP",
151 "HARDLOCKUP",
Kees Cook274a5852013-07-08 10:01:32 -0700152 "SPINLOCKUP",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700153 "HUNG_TASK",
Kees Cookcc33c5372013-07-08 10:01:33 -0700154 "EXEC_DATA",
155 "EXEC_STACK",
156 "EXEC_KMALLOC",
157 "EXEC_VMALLOC",
Kees Cook9a49a522016-02-22 14:09:29 -0800158 "EXEC_RODATA",
Kees Cook9ae113c2013-10-24 09:25:57 -0700159 "EXEC_USERSPACE",
160 "ACCESS_USERSPACE",
161 "WRITE_RO",
Kees Cook7cca0712016-02-17 14:41:16 -0800162 "WRITE_RO_AFTER_INIT",
Kees Cookdc2b9e92014-02-09 13:48:48 -0800163 "WRITE_KERN",
Kees Cookaa981a62016-06-03 12:06:52 -0700164 "WRAP_ATOMIC",
165 "USERCOPY_HEAP_SIZE_TO",
166 "USERCOPY_HEAP_SIZE_FROM",
167 "USERCOPY_HEAP_FLAG_TO",
168 "USERCOPY_HEAP_FLAG_FROM",
169 "USERCOPY_STACK_FRAME_TO",
170 "USERCOPY_STACK_FRAME_FROM",
171 "USERCOPY_STACK_BEYOND",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700172};
173
174static struct jprobe lkdtm;
175
176static int lkdtm_parse_commandline(void);
177static void lkdtm_handler(void);
178
Al Viroec1c6202007-02-09 16:05:17 +0000179static char* cpoint_name;
180static char* cpoint_type;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700181static int cpoint_count = DEFAULT_COUNT;
182static int recur_count = REC_NUM_DEFAULT;
Kees Cookaa981a62016-06-03 12:06:52 -0700183static int alloc_size = 1024;
184static size_t cache_size;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700185
Namhyung Kim93e2f582010-10-26 14:22:40 -0700186static enum cname cpoint = CN_INVALID;
187static enum ctype cptype = CT_NONE;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700188static int count = DEFAULT_COUNT;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700189static DEFINE_SPINLOCK(count_lock);
Kees Cook274a5852013-07-08 10:01:32 -0700190static DEFINE_SPINLOCK(lock_me_up);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700191
Kees Cookcc33c5372013-07-08 10:01:33 -0700192static u8 data_area[EXEC_SIZE];
Kees Cookaa981a62016-06-03 12:06:52 -0700193static struct kmem_cache *bad_cache;
Kees Cookcc33c5372013-07-08 10:01:33 -0700194
Kees Cookaa981a62016-06-03 12:06:52 -0700195static const unsigned char test_text[] = "This is a test.\n";
Kees Cook9ae113c2013-10-24 09:25:57 -0700196static const unsigned long rodata = 0xAA55AA55;
Kees Cook7cca0712016-02-17 14:41:16 -0800197static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
Kees Cook9ae113c2013-10-24 09:25:57 -0700198
Ankita Garg8bb31b92006-10-02 02:17:36 -0700199module_param(recur_count, int, 0644);
Kees Cook7d196ac2013-10-24 09:25:39 -0700200MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
Rusty Russelldca41302010-08-11 23:04:21 -0600201module_param(cpoint_name, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800202MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
Rusty Russelldca41302010-08-11 23:04:21 -0600203module_param(cpoint_type, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800204MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
205 "hitting the crash point");
206module_param(cpoint_count, int, 0644);
207MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
208 "crash point is to be hit to trigger action");
Kees Cookaa981a62016-06-03 12:06:52 -0700209module_param(alloc_size, int, 0644);
210MODULE_PARM_DESC(alloc_size, " Size of allocation for user copy tests "\
211 "(from 1 to PAGE_SIZE)");
Ankita Garg8bb31b92006-10-02 02:17:36 -0700212
Adrian Bunk21181162008-02-06 01:36:50 -0800213static unsigned int jp_do_irq(unsigned int irq)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700214{
215 lkdtm_handler();
216 jprobe_return();
217 return 0;
218}
219
Adrian Bunk21181162008-02-06 01:36:50 -0800220static irqreturn_t jp_handle_irq_event(unsigned int irq,
221 struct irqaction *action)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700222{
223 lkdtm_handler();
224 jprobe_return();
225 return 0;
226}
227
Adrian Bunk21181162008-02-06 01:36:50 -0800228static void jp_tasklet_action(struct softirq_action *a)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700229{
230 lkdtm_handler();
231 jprobe_return();
232}
233
Adrian Bunk21181162008-02-06 01:36:50 -0800234static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
Ankita Garg8bb31b92006-10-02 02:17:36 -0700235{
236 lkdtm_handler();
237 jprobe_return();
238}
239
240struct scan_control;
241
Adrian Bunk21181162008-02-06 01:36:50 -0800242static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
243 struct zone *zone,
244 struct scan_control *sc)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700245{
246 lkdtm_handler();
247 jprobe_return();
248 return 0;
249}
250
Adrian Bunk21181162008-02-06 01:36:50 -0800251static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
252 const enum hrtimer_mode mode)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700253{
254 lkdtm_handler();
255 jprobe_return();
256 return 0;
257}
258
Adrian Bunk21181162008-02-06 01:36:50 -0800259static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700260{
261 lkdtm_handler();
262 jprobe_return();
263 return 0;
264}
265
266#ifdef CONFIG_IDE
Rashika Kheria44629432013-12-13 12:29:42 +0530267static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700268 struct block_device *bdev, unsigned int cmd,
269 unsigned long arg)
270{
271 lkdtm_handler();
272 jprobe_return();
273 return 0;
274}
275#endif
276
Simon Kagstrom0347af42010-03-05 13:42:49 -0800277/* Return the crashpoint number or NONE if the name is invalid */
278static enum ctype parse_cp_type(const char *what, size_t count)
279{
280 int i;
281
282 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
283 if (!strcmp(what, cp_type[i]))
284 return i + 1;
285 }
286
Namhyung Kim93e2f582010-10-26 14:22:40 -0700287 return CT_NONE;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800288}
289
290static const char *cp_type_to_str(enum ctype type)
291{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700292 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800293 return "None";
294
295 return cp_type[type - 1];
296}
297
298static const char *cp_name_to_str(enum cname name)
299{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700300 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800301 return "INVALID";
302
303 return cp_name[name - 1];
304}
305
306
Ankita Garg8bb31b92006-10-02 02:17:36 -0700307static int lkdtm_parse_commandline(void)
308{
309 int i;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700310 unsigned long flags;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700311
Simon Kagstrom0347af42010-03-05 13:42:49 -0800312 if (cpoint_count < 1 || recur_count < 1)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700313 return -EINVAL;
314
Josh Huntaa2c96d2011-06-27 16:18:08 -0700315 spin_lock_irqsave(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700316 count = cpoint_count;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700317 spin_unlock_irqrestore(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700318
Simon Kagstrom0347af42010-03-05 13:42:49 -0800319 /* No special parameters */
320 if (!cpoint_type && !cpoint_name)
321 return 0;
322
323 /* Neither or both of these need to be set */
324 if (!cpoint_type || !cpoint_name)
325 return -EINVAL;
326
327 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
Namhyung Kim93e2f582010-10-26 14:22:40 -0700328 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -0800329 return -EINVAL;
330
331 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
332 if (!strcmp(cpoint_name, cp_name[i])) {
333 cpoint = i + 1;
334 return 0;
335 }
336 }
337
338 /* Could not find a valid crash point */
339 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700340}
341
Kees Cook7d196ac2013-10-24 09:25:39 -0700342static int recursive_loop(int remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700343{
Kees Cook7d196ac2013-10-24 09:25:39 -0700344 char buf[REC_STACK_SIZE];
Ankita Garg8bb31b92006-10-02 02:17:36 -0700345
Kees Cook7d196ac2013-10-24 09:25:39 -0700346 /* Make sure compiler does not optimize this away. */
347 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
348 if (!remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700349 return 0;
350 else
Kees Cook7d196ac2013-10-24 09:25:39 -0700351 return recursive_loop(remaining - 1);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700352}
353
Kees Cookcc33c5372013-07-08 10:01:33 -0700354static void do_nothing(void)
355{
356 return;
357}
358
Kees Cookdc2b9e92014-02-09 13:48:48 -0800359/* Must immediately follow do_nothing for size calculuations to work out. */
360static void do_overwritten(void)
361{
362 pr_info("do_overwritten wasn't overwritten!\n");
363 return;
364}
365
Kees Cook629c66a2013-10-24 18:05:42 -0700366static noinline void corrupt_stack(void)
367{
368 /* Use default char array length that triggers stack protection. */
369 char data[8];
370
371 memset((void *)data, 0, 64);
372}
373
Kees Cook9a49a522016-02-22 14:09:29 -0800374static noinline void execute_location(void *dst, bool write)
Kees Cookcc33c5372013-07-08 10:01:33 -0700375{
376 void (*func)(void) = dst;
377
Kees Cookaac416f2014-02-09 13:48:47 -0800378 pr_info("attempting ok execution at %p\n", do_nothing);
379 do_nothing();
380
Kees Cook9a49a522016-02-22 14:09:29 -0800381 if (write) {
382 memcpy(dst, do_nothing, EXEC_SIZE);
383 flush_icache_range((unsigned long)dst,
384 (unsigned long)dst + EXEC_SIZE);
385 }
Kees Cookaac416f2014-02-09 13:48:47 -0800386 pr_info("attempting bad execution at %p\n", func);
Kees Cookcc33c5372013-07-08 10:01:33 -0700387 func();
388}
389
Kees Cook9ae113c2013-10-24 09:25:57 -0700390static void execute_user_location(void *dst)
391{
Kees Cook51236622013-11-11 11:23:49 -0800392 /* Intentionally crossing kernel/user memory boundary. */
Kees Cook9ae113c2013-10-24 09:25:57 -0700393 void (*func)(void) = dst;
394
Kees Cookaac416f2014-02-09 13:48:47 -0800395 pr_info("attempting ok execution at %p\n", do_nothing);
396 do_nothing();
397
Kees Cook51236622013-11-11 11:23:49 -0800398 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
Kees Cook9ae113c2013-10-24 09:25:57 -0700399 return;
Kees Cookaac416f2014-02-09 13:48:47 -0800400 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
401 pr_info("attempting bad execution at %p\n", func);
Kees Cook9ae113c2013-10-24 09:25:57 -0700402 func();
403}
404
Kees Cookaa981a62016-06-03 12:06:52 -0700405/*
406 * Instead of adding -Wno-return-local-addr, just pass the stack address
407 * through a function to obfuscate it from the compiler.
408 */
409static noinline unsigned char *trick_compiler(unsigned char *stack)
410{
411 return stack + 0;
412}
413
414static noinline unsigned char *do_usercopy_stack_callee(int value)
415{
416 unsigned char buf[32];
417 int i;
418
419 /* Exercise stack to avoid everything living in registers. */
420 for (i = 0; i < sizeof(buf); i++) {
421 buf[i] = value & 0xff;
422 }
423
424 return trick_compiler(buf);
425}
426
427static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
428{
429 unsigned long user_addr;
430 unsigned char good_stack[32];
431 unsigned char *bad_stack;
432 int i;
433
434 /* Exercise stack to avoid everything living in registers. */
435 for (i = 0; i < sizeof(good_stack); i++)
436 good_stack[i] = test_text[i % sizeof(test_text)];
437
438 /* This is a pointer to outside our current stack frame. */
439 if (bad_frame) {
440 bad_stack = do_usercopy_stack_callee(alloc_size);
441 } else {
442 /* Put start address just inside stack. */
443 bad_stack = task_stack_page(current) + THREAD_SIZE;
444 bad_stack -= sizeof(unsigned long);
445 }
446
447 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
448 PROT_READ | PROT_WRITE | PROT_EXEC,
449 MAP_ANONYMOUS | MAP_PRIVATE, 0);
450 if (user_addr >= TASK_SIZE) {
451 pr_warn("Failed to allocate user memory\n");
452 return;
453 }
454
455 if (to_user) {
456 pr_info("attempting good copy_to_user of local stack\n");
457 if (copy_to_user((void __user *)user_addr, good_stack,
458 sizeof(good_stack))) {
459 pr_warn("copy_to_user failed unexpectedly?!\n");
460 goto free_user;
461 }
462
463 pr_info("attempting bad copy_to_user of distant stack\n");
464 if (copy_to_user((void __user *)user_addr, bad_stack,
465 sizeof(good_stack))) {
466 pr_warn("copy_to_user failed, but lacked Oops\n");
467 goto free_user;
468 }
469 } else {
470 /*
471 * There isn't a safe way to not be protected by usercopy
472 * if we're going to write to another thread's stack.
473 */
474 if (!bad_frame)
475 goto free_user;
476
477 pr_info("attempting good copy_from_user of local stack\n");
478 if (copy_from_user(good_stack, (void __user *)user_addr,
479 sizeof(good_stack))) {
480 pr_warn("copy_from_user failed unexpectedly?!\n");
481 goto free_user;
482 }
483
484 pr_info("attempting bad copy_from_user of distant stack\n");
485 if (copy_from_user(bad_stack, (void __user *)user_addr,
486 sizeof(good_stack))) {
487 pr_warn("copy_from_user failed, but lacked Oops\n");
488 goto free_user;
489 }
490 }
491
492free_user:
493 vm_munmap(user_addr, PAGE_SIZE);
494}
495
496static void do_usercopy_heap_size(bool to_user)
497{
498 unsigned long user_addr;
499 unsigned char *one, *two;
500 size_t size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
501
502 one = kmalloc(size, GFP_KERNEL);
503 two = kmalloc(size, GFP_KERNEL);
504 if (!one || !two) {
505 pr_warn("Failed to allocate kernel memory\n");
506 goto free_kernel;
507 }
508
509 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
510 PROT_READ | PROT_WRITE | PROT_EXEC,
511 MAP_ANONYMOUS | MAP_PRIVATE, 0);
512 if (user_addr >= TASK_SIZE) {
513 pr_warn("Failed to allocate user memory\n");
514 goto free_kernel;
515 }
516
517 memset(one, 'A', size);
518 memset(two, 'B', size);
519
520 if (to_user) {
521 pr_info("attempting good copy_to_user of correct size\n");
522 if (copy_to_user((void __user *)user_addr, one, size)) {
523 pr_warn("copy_to_user failed unexpectedly?!\n");
524 goto free_user;
525 }
526
527 pr_info("attempting bad copy_to_user of too large size\n");
528 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
529 pr_warn("copy_to_user failed, but lacked Oops\n");
530 goto free_user;
531 }
532 } else {
533 pr_info("attempting good copy_from_user of correct size\n");
534 if (copy_from_user(one, (void __user *)user_addr,
535 size)) {
536 pr_warn("copy_from_user failed unexpectedly?!\n");
537 goto free_user;
538 }
539
540 pr_info("attempting bad copy_from_user of too large size\n");
541 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
542 pr_warn("copy_from_user failed, but lacked Oops\n");
543 goto free_user;
544 }
545 }
546
547free_user:
548 vm_munmap(user_addr, PAGE_SIZE);
549free_kernel:
550 kfree(one);
551 kfree(two);
552}
553
554static void do_usercopy_heap_flag(bool to_user)
555{
556 unsigned long user_addr;
557 unsigned char *good_buf = NULL;
558 unsigned char *bad_buf = NULL;
559
560 /* Make sure cache was prepared. */
561 if (!bad_cache) {
562 pr_warn("Failed to allocate kernel cache\n");
563 return;
564 }
565
566 /*
567 * Allocate one buffer from each cache (kmalloc will have the
568 * SLAB_USERCOPY flag already, but "bad_cache" won't).
569 */
570 good_buf = kmalloc(cache_size, GFP_KERNEL);
571 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
572 if (!good_buf || !bad_buf) {
573 pr_warn("Failed to allocate buffers from caches\n");
574 goto free_alloc;
575 }
576
577 /* Allocate user memory we'll poke at. */
578 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
579 PROT_READ | PROT_WRITE | PROT_EXEC,
580 MAP_ANONYMOUS | MAP_PRIVATE, 0);
581 if (user_addr >= TASK_SIZE) {
582 pr_warn("Failed to allocate user memory\n");
583 goto free_alloc;
584 }
585
586 memset(good_buf, 'A', cache_size);
587 memset(bad_buf, 'B', cache_size);
588
589 if (to_user) {
590 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
591 if (copy_to_user((void __user *)user_addr, good_buf,
592 cache_size)) {
593 pr_warn("copy_to_user failed unexpectedly?!\n");
594 goto free_user;
595 }
596
597 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
598 if (copy_to_user((void __user *)user_addr, bad_buf,
599 cache_size)) {
600 pr_warn("copy_to_user failed, but lacked Oops\n");
601 goto free_user;
602 }
603 } else {
604 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
605 if (copy_from_user(good_buf, (void __user *)user_addr,
606 cache_size)) {
607 pr_warn("copy_from_user failed unexpectedly?!\n");
608 goto free_user;
609 }
610
611 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
612 if (copy_from_user(bad_buf, (void __user *)user_addr,
613 cache_size)) {
614 pr_warn("copy_from_user failed, but lacked Oops\n");
615 goto free_user;
616 }
617 }
618
619free_user:
620 vm_munmap(user_addr, PAGE_SIZE);
621free_alloc:
622 if (bad_buf)
623 kmem_cache_free(bad_cache, bad_buf);
624 kfree(good_buf);
625}
626
Simon Kagstrom0347af42010-03-05 13:42:49 -0800627static void lkdtm_do_action(enum ctype which)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700628{
Simon Kagstrom0347af42010-03-05 13:42:49 -0800629 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -0700630 case CT_PANIC:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800631 panic("dumptest");
632 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700633 case CT_BUG:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800634 BUG();
635 break;
Kees Cook65892722013-07-08 10:01:31 -0700636 case CT_WARNING:
637 WARN_ON(1);
638 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700639 case CT_EXCEPTION:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800640 *((int *) 0) = 0;
641 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700642 case CT_LOOP:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800643 for (;;)
644 ;
645 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700646 case CT_OVERFLOW:
Kees Cook7d196ac2013-10-24 09:25:39 -0700647 (void) recursive_loop(recur_count);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800648 break;
Kees Cook629c66a2013-10-24 18:05:42 -0700649 case CT_CORRUPT_STACK:
650 corrupt_stack();
Simon Kagstrom0347af42010-03-05 13:42:49 -0800651 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700652 case CT_UNALIGNED_LOAD_STORE_WRITE: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800653 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
654 3, 4, 5};
655 u32 *p;
656 u32 val = 0x12345678;
657
658 p = (u32 *)(data + 1);
659 if (*p == 0)
660 val = 0x87654321;
661 *p = val;
662 break;
663 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700664 case CT_OVERWRITE_ALLOCATION: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800665 size_t len = 1020;
666 u32 *data = kmalloc(len, GFP_KERNEL);
667
668 data[1024 / sizeof(u32)] = 0x12345678;
669 kfree(data);
670 break;
671 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700672 case CT_WRITE_AFTER_FREE: {
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800673 int *base, *again;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800674 size_t len = 1024;
Laura Abbott250a8982016-02-25 16:36:43 -0800675 /*
676 * The slub allocator uses the first word to store the free
677 * pointer in some configurations. Use the middle of the
678 * allocation to avoid running into the freelist
679 */
680 size_t offset = (len / sizeof(*base)) / 2;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800681
Laura Abbott250a8982016-02-25 16:36:43 -0800682 base = kmalloc(len, GFP_KERNEL);
683 pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
Laura Abbott250a8982016-02-25 16:36:43 -0800684 pr_info("Attempting bad write to freed memory at %p\n",
685 &base[offset]);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800686 kfree(base);
Laura Abbott250a8982016-02-25 16:36:43 -0800687 base[offset] = 0x0abcdef0;
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800688 /* Attempt to notice the overwrite. */
689 again = kmalloc(len, GFP_KERNEL);
690 kfree(again);
691 if (again != base)
692 pr_info("Hmm, didn't get the same memory range.\n");
693
Simon Kagstrom0347af42010-03-05 13:42:49 -0800694 break;
695 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800696 case CT_READ_AFTER_FREE: {
697 int *base, *val, saw;
698 size_t len = 1024;
699 /*
700 * The slub allocator uses the first word to store the free
701 * pointer in some configurations. Use the middle of the
702 * allocation to avoid running into the freelist
703 */
704 size_t offset = (len / sizeof(*base)) / 2;
705
706 base = kmalloc(len, GFP_KERNEL);
707 if (!base)
708 break;
709
710 val = kmalloc(len, GFP_KERNEL);
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530711 if (!val) {
712 kfree(base);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800713 break;
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530714 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800715
716 *val = 0x12345678;
717 base[offset] = *val;
718 pr_info("Value in memory before free: %x\n", base[offset]);
719
720 kfree(base);
721
722 pr_info("Attempting bad read from freed memory\n");
723 saw = base[offset];
724 if (saw != *val) {
725 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800726 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800727 BUG();
728 }
729 pr_info("Memory was not poisoned\n");
730
731 kfree(val);
732 break;
733 }
Laura Abbott920d4512016-02-25 16:36:44 -0800734 case CT_WRITE_BUDDY_AFTER_FREE: {
735 unsigned long p = __get_free_page(GFP_KERNEL);
736 if (!p)
737 break;
738 pr_info("Writing to the buddy page before free\n");
739 memset((void *)p, 0x3, PAGE_SIZE);
740 free_page(p);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800741 schedule();
Laura Abbott920d4512016-02-25 16:36:44 -0800742 pr_info("Attempting bad write to the buddy page after free\n");
743 memset((void *)p, 0x78, PAGE_SIZE);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800744 /* Attempt to notice the overwrite. */
745 p = __get_free_page(GFP_KERNEL);
746 free_page(p);
747 schedule();
748
Laura Abbott920d4512016-02-25 16:36:44 -0800749 break;
750 }
751 case CT_READ_BUDDY_AFTER_FREE: {
752 unsigned long p = __get_free_page(GFP_KERNEL);
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530753 int saw, *val;
Laura Abbott920d4512016-02-25 16:36:44 -0800754 int *base;
755
756 if (!p)
757 break;
758
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530759 val = kmalloc(1024, GFP_KERNEL);
Kees Cook3d085c72016-04-06 15:53:27 -0700760 if (!val) {
761 free_page(p);
Laura Abbott920d4512016-02-25 16:36:44 -0800762 break;
Kees Cook3d085c72016-04-06 15:53:27 -0700763 }
Laura Abbott920d4512016-02-25 16:36:44 -0800764
765 base = (int *)p;
766
767 *val = 0x12345678;
768 base[0] = *val;
769 pr_info("Value in memory before free: %x\n", base[0]);
770 free_page(p);
771 pr_info("Attempting to read from freed memory\n");
772 saw = base[0];
773 if (saw != *val) {
774 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800775 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbott920d4512016-02-25 16:36:44 -0800776 BUG();
777 }
778 pr_info("Buddy page was not poisoned\n");
779
780 kfree(val);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800781 break;
782 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700783 case CT_SOFTLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700784 preempt_disable();
785 for (;;)
786 cpu_relax();
787 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700788 case CT_HARDLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700789 local_irq_disable();
790 for (;;)
791 cpu_relax();
792 break;
Kees Cook274a5852013-07-08 10:01:32 -0700793 case CT_SPINLOCKUP:
794 /* Must be called twice to trigger. */
795 spin_lock(&lock_me_up);
Kees Cook51236622013-11-11 11:23:49 -0800796 /* Let sparse know we intended to exit holding the lock. */
797 __release(&lock_me_up);
Kees Cook274a5852013-07-08 10:01:32 -0700798 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700799 case CT_HUNG_TASK:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700800 set_current_state(TASK_UNINTERRUPTIBLE);
801 schedule();
802 break;
Kees Cookcc33c5372013-07-08 10:01:33 -0700803 case CT_EXEC_DATA:
Kees Cook9a49a522016-02-22 14:09:29 -0800804 execute_location(data_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700805 break;
806 case CT_EXEC_STACK: {
807 u8 stack_area[EXEC_SIZE];
Kees Cook9a49a522016-02-22 14:09:29 -0800808 execute_location(stack_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700809 break;
810 }
811 case CT_EXEC_KMALLOC: {
812 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
Kees Cook9a49a522016-02-22 14:09:29 -0800813 execute_location(kmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700814 kfree(kmalloc_area);
815 break;
816 }
817 case CT_EXEC_VMALLOC: {
818 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
Kees Cook9a49a522016-02-22 14:09:29 -0800819 execute_location(vmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700820 vfree(vmalloc_area);
821 break;
822 }
Kees Cook9a49a522016-02-22 14:09:29 -0800823 case CT_EXEC_RODATA:
824 execute_location(lkdtm_rodata_do_nothing, false);
825 break;
Kees Cook9ae113c2013-10-24 09:25:57 -0700826 case CT_EXEC_USERSPACE: {
827 unsigned long user_addr;
828
829 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
830 PROT_READ | PROT_WRITE | PROT_EXEC,
831 MAP_ANONYMOUS | MAP_PRIVATE, 0);
832 if (user_addr >= TASK_SIZE) {
833 pr_warn("Failed to allocate user memory\n");
834 return;
835 }
836 execute_user_location((void *)user_addr);
837 vm_munmap(user_addr, PAGE_SIZE);
838 break;
839 }
840 case CT_ACCESS_USERSPACE: {
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400841 unsigned long user_addr, tmp = 0;
Kees Cook9ae113c2013-10-24 09:25:57 -0700842 unsigned long *ptr;
843
844 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
845 PROT_READ | PROT_WRITE | PROT_EXEC,
846 MAP_ANONYMOUS | MAP_PRIVATE, 0);
847 if (user_addr >= TASK_SIZE) {
848 pr_warn("Failed to allocate user memory\n");
849 return;
850 }
851
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400852 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
853 pr_warn("copy_to_user failed\n");
854 vm_munmap(user_addr, PAGE_SIZE);
855 return;
856 }
857
Kees Cook9ae113c2013-10-24 09:25:57 -0700858 ptr = (unsigned long *)user_addr;
Kees Cookaac416f2014-02-09 13:48:47 -0800859
860 pr_info("attempting bad read at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700861 tmp = *ptr;
862 tmp += 0xc0dec0de;
Kees Cookaac416f2014-02-09 13:48:47 -0800863
864 pr_info("attempting bad write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700865 *ptr = tmp;
866
867 vm_munmap(user_addr, PAGE_SIZE);
868
869 break;
870 }
871 case CT_WRITE_RO: {
Kees Cook7cca0712016-02-17 14:41:16 -0800872 /* Explicitly cast away "const" for the test. */
873 unsigned long *ptr = (unsigned long *)&rodata;
Kees Cook9ae113c2013-10-24 09:25:57 -0700874
Kees Cook7cca0712016-02-17 14:41:16 -0800875 pr_info("attempting bad rodata write at %p\n", ptr);
876 *ptr ^= 0xabcd1234;
Kees Cookaac416f2014-02-09 13:48:47 -0800877
Kees Cook7cca0712016-02-17 14:41:16 -0800878 break;
879 }
880 case CT_WRITE_RO_AFTER_INIT: {
881 unsigned long *ptr = &ro_after_init;
882
883 /*
884 * Verify we were written to during init. Since an Oops
885 * is considered a "success", a failure is to just skip the
886 * real test.
887 */
888 if ((*ptr & 0xAA) != 0xAA) {
889 pr_info("%p was NOT written during init!?\n", ptr);
890 break;
891 }
892
893 pr_info("attempting bad ro_after_init write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700894 *ptr ^= 0xabcd1234;
895
896 break;
897 }
Kees Cookdc2b9e92014-02-09 13:48:48 -0800898 case CT_WRITE_KERN: {
899 size_t size;
900 unsigned char *ptr;
901
902 size = (unsigned long)do_overwritten -
903 (unsigned long)do_nothing;
904 ptr = (unsigned char *)do_overwritten;
905
906 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
907 memcpy(ptr, (unsigned char *)do_nothing, size);
908 flush_icache_range((unsigned long)ptr,
909 (unsigned long)(ptr + size));
910
911 do_overwritten();
912 break;
913 }
David Windsor5fd9e482015-12-17 00:56:36 -0500914 case CT_WRAP_ATOMIC: {
915 atomic_t under = ATOMIC_INIT(INT_MIN);
916 atomic_t over = ATOMIC_INIT(INT_MAX);
917
918 pr_info("attempting atomic underflow\n");
919 atomic_dec(&under);
920 pr_info("attempting atomic overflow\n");
921 atomic_inc(&over);
922
923 return;
924 }
Kees Cookaa981a62016-06-03 12:06:52 -0700925 case CT_USERCOPY_HEAP_SIZE_TO:
926 do_usercopy_heap_size(true);
927 break;
928 case CT_USERCOPY_HEAP_SIZE_FROM:
929 do_usercopy_heap_size(false);
930 break;
931 case CT_USERCOPY_HEAP_FLAG_TO:
932 do_usercopy_heap_flag(true);
933 break;
934 case CT_USERCOPY_HEAP_FLAG_FROM:
935 do_usercopy_heap_flag(false);
936 break;
937 case CT_USERCOPY_STACK_FRAME_TO:
938 do_usercopy_stack(true, true);
939 break;
940 case CT_USERCOPY_STACK_FRAME_FROM:
941 do_usercopy_stack(false, true);
942 break;
943 case CT_USERCOPY_STACK_BEYOND:
944 do_usercopy_stack(true, false);
945 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700946 case CT_NONE:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800947 default:
948 break;
949 }
950
951}
952
953static void lkdtm_handler(void)
954{
Josh Huntaa2c96d2011-06-27 16:18:08 -0700955 unsigned long flags;
Cong Wang92618182012-02-03 15:37:15 -0800956 bool do_it = false;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700957
958 spin_lock_irqsave(&count_lock, flags);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800959 count--;
Kees Cookfeac6e22014-02-09 13:48:46 -0800960 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
961 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700962
963 if (count == 0) {
Cong Wang92618182012-02-03 15:37:15 -0800964 do_it = true;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700965 count = cpoint_count;
966 }
Josh Huntaa2c96d2011-06-27 16:18:08 -0700967 spin_unlock_irqrestore(&count_lock, flags);
Cong Wang92618182012-02-03 15:37:15 -0800968
969 if (do_it)
970 lkdtm_do_action(cptype);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700971}
972
Simon Kagstrom0347af42010-03-05 13:42:49 -0800973static int lkdtm_register_cpoint(enum cname which)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700974{
975 int ret;
976
Namhyung Kim93e2f582010-10-26 14:22:40 -0700977 cpoint = CN_INVALID;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800978 if (lkdtm.entry != NULL)
979 unregister_jprobe(&lkdtm);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700980
Simon Kagstrom0347af42010-03-05 13:42:49 -0800981 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -0700982 case CN_DIRECT:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800983 lkdtm_do_action(cptype);
984 return 0;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700985 case CN_INT_HARDWARE_ENTRY:
M. Mohan Kumarf58f2fa2009-09-22 16:43:29 -0700986 lkdtm.kp.symbol_name = "do_IRQ";
Ankita Garg8bb31b92006-10-02 02:17:36 -0700987 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
988 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700989 case CN_INT_HW_IRQ_EN:
Ankita Garg8bb31b92006-10-02 02:17:36 -0700990 lkdtm.kp.symbol_name = "handle_IRQ_event";
991 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
992 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700993 case CN_INT_TASKLET_ENTRY:
Ankita Garg8bb31b92006-10-02 02:17:36 -0700994 lkdtm.kp.symbol_name = "tasklet_action";
995 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
996 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700997 case CN_FS_DEVRW:
Ankita Garg8bb31b92006-10-02 02:17:36 -0700998 lkdtm.kp.symbol_name = "ll_rw_block";
999 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
1000 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001001 case CN_MEM_SWAPOUT:
Ankita Garg18a61e42006-11-05 23:52:07 -08001002 lkdtm.kp.symbol_name = "shrink_inactive_list";
1003 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001004 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001005 case CN_TIMERADD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001006 lkdtm.kp.symbol_name = "hrtimer_start";
1007 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
1008 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001009 case CN_SCSI_DISPATCH_CMD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001010 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
1011 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
1012 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001013 case CN_IDE_CORE_CP:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001014#ifdef CONFIG_IDE
1015 lkdtm.kp.symbol_name = "generic_ide_ioctl";
1016 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
1017#else
Kees Cookfeac6e22014-02-09 13:48:46 -08001018 pr_info("Crash point not available\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001019 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001020#endif
1021 break;
1022 default:
Kees Cookfeac6e22014-02-09 13:48:46 -08001023 pr_info("Invalid Crash Point\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001024 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001025 }
1026
Simon Kagstrom0347af42010-03-05 13:42:49 -08001027 cpoint = which;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001028 if ((ret = register_jprobe(&lkdtm)) < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001029 pr_info("Couldn't register jprobe\n");
Namhyung Kim93e2f582010-10-26 14:22:40 -07001030 cpoint = CN_INVALID;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001031 }
1032
Simon Kagstrom0347af42010-03-05 13:42:49 -08001033 return ret;
1034}
1035
1036static ssize_t do_register_entry(enum cname which, struct file *f,
1037 const char __user *user_buf, size_t count, loff_t *off)
1038{
1039 char *buf;
1040 int err;
1041
1042 if (count >= PAGE_SIZE)
1043 return -EINVAL;
1044
1045 buf = (char *)__get_free_page(GFP_KERNEL);
1046 if (!buf)
1047 return -ENOMEM;
1048 if (copy_from_user(buf, user_buf, count)) {
1049 free_page((unsigned long) buf);
1050 return -EFAULT;
1051 }
1052 /* NULL-terminate and remove enter */
1053 buf[count] = '\0';
1054 strim(buf);
1055
1056 cptype = parse_cp_type(buf, count);
1057 free_page((unsigned long) buf);
1058
Namhyung Kim93e2f582010-10-26 14:22:40 -07001059 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001060 return -EINVAL;
1061
1062 err = lkdtm_register_cpoint(which);
1063 if (err < 0)
1064 return err;
1065
1066 *off += count;
1067
1068 return count;
1069}
1070
1071/* Generic read callback that just prints out the available crash types */
1072static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
1073 size_t count, loff_t *off)
1074{
1075 char *buf;
1076 int i, n, out;
1077
1078 buf = (char *)__get_free_page(GFP_KERNEL);
Alan Cox086ff4b2012-07-30 14:43:24 -07001079 if (buf == NULL)
1080 return -ENOMEM;
Simon Kagstrom0347af42010-03-05 13:42:49 -08001081
1082 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
1083 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
1084 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
1085 buf[n] = '\0';
1086
1087 out = simple_read_from_buffer(user_buf, count, off,
1088 buf, n);
1089 free_page((unsigned long) buf);
1090
1091 return out;
1092}
1093
1094static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
1095{
Ankita Garg8bb31b92006-10-02 02:17:36 -07001096 return 0;
1097}
1098
Simon Kagstrom0347af42010-03-05 13:42:49 -08001099
1100static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
1101 size_t count, loff_t *off)
1102{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001103 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001104}
1105
1106static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
1107 size_t count, loff_t *off)
1108{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001109 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001110}
1111
1112static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
1113 size_t count, loff_t *off)
1114{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001115 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001116}
1117
1118static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
1119 size_t count, loff_t *off)
1120{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001121 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001122}
1123
1124static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
1125 size_t count, loff_t *off)
1126{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001127 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001128}
1129
1130static ssize_t timeradd_entry(struct file *f, const char __user *buf,
1131 size_t count, loff_t *off)
1132{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001133 return do_register_entry(CN_TIMERADD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001134}
1135
1136static ssize_t scsi_dispatch_cmd_entry(struct file *f,
1137 const char __user *buf, size_t count, loff_t *off)
1138{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001139 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001140}
1141
1142static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
1143 size_t count, loff_t *off)
1144{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001145 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001146}
1147
1148/* Special entry to just crash directly. Available without KPROBEs */
1149static ssize_t direct_entry(struct file *f, const char __user *user_buf,
1150 size_t count, loff_t *off)
1151{
1152 enum ctype type;
1153 char *buf;
1154
1155 if (count >= PAGE_SIZE)
1156 return -EINVAL;
1157 if (count < 1)
1158 return -EINVAL;
1159
1160 buf = (char *)__get_free_page(GFP_KERNEL);
1161 if (!buf)
1162 return -ENOMEM;
1163 if (copy_from_user(buf, user_buf, count)) {
1164 free_page((unsigned long) buf);
1165 return -EFAULT;
1166 }
1167 /* NULL-terminate and remove enter */
1168 buf[count] = '\0';
1169 strim(buf);
1170
1171 type = parse_cp_type(buf, count);
1172 free_page((unsigned long) buf);
Namhyung Kim93e2f582010-10-26 14:22:40 -07001173 if (type == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001174 return -EINVAL;
1175
Kees Cookfeac6e22014-02-09 13:48:46 -08001176 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
Simon Kagstrom0347af42010-03-05 13:42:49 -08001177 lkdtm_do_action(type);
1178 *off += count;
1179
1180 return count;
1181}
1182
1183struct crash_entry {
1184 const char *name;
1185 const struct file_operations fops;
1186};
1187
1188static const struct crash_entry crash_entries[] = {
1189 {"DIRECT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001190 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001191 .open = lkdtm_debugfs_open,
1192 .write = direct_entry} },
1193 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001194 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001195 .open = lkdtm_debugfs_open,
1196 .write = int_hardware_entry} },
1197 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001198 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001199 .open = lkdtm_debugfs_open,
1200 .write = int_hw_irq_en} },
1201 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001202 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001203 .open = lkdtm_debugfs_open,
1204 .write = int_tasklet_entry} },
1205 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001206 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001207 .open = lkdtm_debugfs_open,
1208 .write = fs_devrw_entry} },
1209 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001210 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001211 .open = lkdtm_debugfs_open,
1212 .write = mem_swapout_entry} },
1213 {"TIMERADD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001214 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001215 .open = lkdtm_debugfs_open,
1216 .write = timeradd_entry} },
1217 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001218 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001219 .open = lkdtm_debugfs_open,
1220 .write = scsi_dispatch_cmd_entry} },
1221 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001222 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001223 .open = lkdtm_debugfs_open,
1224 .write = ide_core_cp_entry} },
1225};
1226
1227static struct dentry *lkdtm_debugfs_root;
1228
1229static int __init lkdtm_module_init(void)
1230{
1231 int ret = -EINVAL;
1232 int n_debugfs_entries = 1; /* Assume only the direct entry */
1233 int i;
1234
Kees Cook7cca0712016-02-17 14:41:16 -08001235 /* Make sure we can write to __ro_after_init values during __init */
1236 ro_after_init |= 0xAA;
1237
Kees Cookaa981a62016-06-03 12:06:52 -07001238 /* Prepare cache that lacks SLAB_USERCOPY flag. */
1239 cache_size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
1240 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
1241 0, NULL);
1242
Simon Kagstrom0347af42010-03-05 13:42:49 -08001243 /* Register debugfs interface */
1244 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
1245 if (!lkdtm_debugfs_root) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001246 pr_err("creating root dir failed\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001247 return -ENODEV;
1248 }
1249
1250#ifdef CONFIG_KPROBES
1251 n_debugfs_entries = ARRAY_SIZE(crash_entries);
1252#endif
1253
1254 for (i = 0; i < n_debugfs_entries; i++) {
1255 const struct crash_entry *cur = &crash_entries[i];
1256 struct dentry *de;
1257
1258 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
1259 NULL, &cur->fops);
1260 if (de == NULL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001261 pr_err("could not create %s\n", cur->name);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001262 goto out_err;
1263 }
1264 }
1265
1266 if (lkdtm_parse_commandline() == -EINVAL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001267 pr_info("Invalid command\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001268 goto out_err;
1269 }
1270
Namhyung Kim93e2f582010-10-26 14:22:40 -07001271 if (cpoint != CN_INVALID && cptype != CT_NONE) {
Simon Kagstrom0347af42010-03-05 13:42:49 -08001272 ret = lkdtm_register_cpoint(cpoint);
1273 if (ret < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001274 pr_info("Invalid crash point %d\n", cpoint);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001275 goto out_err;
1276 }
Kees Cookfeac6e22014-02-09 13:48:46 -08001277 pr_info("Crash point %s of type %s registered\n",
1278 cpoint_name, cpoint_type);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001279 } else {
Kees Cookfeac6e22014-02-09 13:48:46 -08001280 pr_info("No crash points registered, enable through debugfs\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001281 }
1282
1283 return 0;
1284
1285out_err:
1286 debugfs_remove_recursive(lkdtm_debugfs_root);
1287 return ret;
1288}
1289
Adrian Bunk21181162008-02-06 01:36:50 -08001290static void __exit lkdtm_module_exit(void)
Ankita Garg8bb31b92006-10-02 02:17:36 -07001291{
Simon Kagstrom0347af42010-03-05 13:42:49 -08001292 debugfs_remove_recursive(lkdtm_debugfs_root);
1293
Kees Cookaa981a62016-06-03 12:06:52 -07001294 kmem_cache_destroy(bad_cache);
1295
Simon Kagstrom0347af42010-03-05 13:42:49 -08001296 unregister_jprobe(&lkdtm);
Kees Cookfeac6e22014-02-09 13:48:46 -08001297 pr_info("Crash point unregistered\n");
Ankita Garg8bb31b92006-10-02 02:17:36 -07001298}
1299
1300module_init(lkdtm_module_init);
1301module_exit(lkdtm_module_exit);
1302
1303MODULE_LICENSE("GPL");
Terry Chiada869202014-07-02 21:02:25 +08001304MODULE_DESCRIPTION("Kprobe module for testing crash dumps");