blob: 660717a1ea5cd8601488b27ac956a960a9b2f6c9 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Kees Cookf5509cc2016-06-07 11:05:33 -07002/*
3 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4 * which are designed to protect kernel memory from needless exposure
5 * and overwrite under many unintended conditions. This code is based
6 * on PAX_USERCOPY, which is:
7 *
8 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9 * Security Inc.
Kees Cookf5509cc2016-06-07 11:05:33 -070010 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/mm.h>
Kees Cook314eed32019-09-17 11:00:25 -070014#include <linux/highmem.h>
Kees Cookf5509cc2016-06-07 11:05:33 -070015#include <linux/slab.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010016#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010017#include <linux/sched/task.h>
18#include <linux/sched/task_stack.h>
Sahara96dc4f92017-02-16 18:29:15 +000019#include <linux/thread_info.h>
Chris von Recklinghausenb5cb15d2018-07-03 15:43:08 -040020#include <linux/atomic.h>
21#include <linux/jump_label.h>
Kees Cookf5509cc2016-06-07 11:05:33 -070022#include <asm/sections.h>
23
Kees Cookf5509cc2016-06-07 11:05:33 -070024/*
25 * Checks if a given pointer and length is contained by the current
26 * stack frame (if possible).
27 *
28 * Returns:
29 * NOT_STACK: not at all on the stack
30 * GOOD_FRAME: fully within a valid stack frame
31 * GOOD_STACK: fully on the stack (when can't do frame-checking)
32 * BAD_STACK: error condition (invalid stack position or bad stack frame)
33 */
34static noinline int check_stack_object(const void *obj, unsigned long len)
35{
36 const void * const stack = task_stack_page(current);
37 const void * const stackend = stack + THREAD_SIZE;
38 int ret;
39
40 /* Object is not on the stack at all. */
41 if (obj + len <= stack || stackend <= obj)
42 return NOT_STACK;
43
44 /*
45 * Reject: object partially overlaps the stack (passing the
46 * the check above means at least one end is within the stack,
47 * so if this check fails, the other end is outside the stack).
48 */
49 if (obj < stack || stackend < obj + len)
50 return BAD_STACK;
51
52 /* Check if object is safely within a valid frame. */
53 ret = arch_within_stack_frames(stack, stackend, obj, len);
54 if (ret)
55 return ret;
56
57 return GOOD_STACK;
58}
59
Kees Cookb394d462018-01-10 14:22:38 -080060/*
Kees Cookafcc90f82018-01-10 15:17:01 -080061 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
62 * an unexpected state during a copy_from_user() or copy_to_user() call.
Kees Cookb394d462018-01-10 14:22:38 -080063 * There are several checks being performed on the buffer by the
64 * __check_object_size() function. Normal stack buffer usage should never
65 * trip the checks, and kernel text addressing will always trip the check.
Kees Cookafcc90f82018-01-10 15:17:01 -080066 * For cache objects, it is checking that only the whitelisted range of
67 * bytes for a given cache is being accessed (via the cache's usersize and
68 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
69 * kmem_cache_create_usercopy() function to create the cache (and
70 * carefully audit the whitelist range).
Kees Cookb394d462018-01-10 14:22:38 -080071 */
Kees Cookafcc90f82018-01-10 15:17:01 -080072void usercopy_warn(const char *name, const char *detail, bool to_user,
73 unsigned long offset, unsigned long len)
74{
75 WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
76 to_user ? "exposure" : "overwrite",
77 to_user ? "from" : "to",
78 name ? : "unknown?!",
79 detail ? " '" : "", detail ? : "", detail ? "'" : "",
80 offset, len);
81}
82
Kees Cookb394d462018-01-10 14:22:38 -080083void __noreturn usercopy_abort(const char *name, const char *detail,
84 bool to_user, unsigned long offset,
85 unsigned long len)
Kees Cookf5509cc2016-06-07 11:05:33 -070086{
Kees Cookb394d462018-01-10 14:22:38 -080087 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
88 to_user ? "exposure" : "overwrite",
89 to_user ? "from" : "to",
90 name ? : "unknown?!",
91 detail ? " '" : "", detail ? : "", detail ? "'" : "",
92 offset, len);
93
Kees Cookf5509cc2016-06-07 11:05:33 -070094 /*
95 * For greater effect, it would be nice to do do_group_exit(),
96 * but BUG() actually hooks all the lock-breaking and per-arch
97 * Oops code, so that is used here instead.
98 */
99 BUG();
100}
101
102/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
Kees Cookf4e6e282018-01-10 14:48:22 -0800103static bool overlaps(const unsigned long ptr, unsigned long n,
104 unsigned long low, unsigned long high)
Kees Cookf5509cc2016-06-07 11:05:33 -0700105{
Kees Cookf4e6e282018-01-10 14:48:22 -0800106 const unsigned long check_low = ptr;
Kees Cookf5509cc2016-06-07 11:05:33 -0700107 unsigned long check_high = check_low + n;
108
109 /* Does not overlap if entirely above or entirely below. */
Josh Poimboeuf94cd97a2016-08-22 11:53:59 -0500110 if (check_low >= high || check_high <= low)
Kees Cookf5509cc2016-06-07 11:05:33 -0700111 return false;
112
113 return true;
114}
115
116/* Is this address range in the kernel text area? */
Kees Cookf4e6e282018-01-10 14:48:22 -0800117static inline void check_kernel_text_object(const unsigned long ptr,
118 unsigned long n, bool to_user)
Kees Cookf5509cc2016-06-07 11:05:33 -0700119{
120 unsigned long textlow = (unsigned long)_stext;
121 unsigned long texthigh = (unsigned long)_etext;
122 unsigned long textlow_linear, texthigh_linear;
123
124 if (overlaps(ptr, n, textlow, texthigh))
Kees Cookf4e6e282018-01-10 14:48:22 -0800125 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700126
127 /*
128 * Some architectures have virtual memory mappings with a secondary
129 * mapping of the kernel text, i.e. there is more than one virtual
130 * kernel address that points to the kernel image. It is usually
131 * when there is a separate linear physical memory mapping, in that
132 * __pa() is not just the reverse of __va(). This can be detected
133 * and checked:
134 */
Laura Abbott46f62362017-01-10 13:35:45 -0800135 textlow_linear = (unsigned long)lm_alias(textlow);
Kees Cookf5509cc2016-06-07 11:05:33 -0700136 /* No different mapping: we're done. */
137 if (textlow_linear == textlow)
Kees Cookf4e6e282018-01-10 14:48:22 -0800138 return;
Kees Cookf5509cc2016-06-07 11:05:33 -0700139
140 /* Check the secondary mapping... */
Laura Abbott46f62362017-01-10 13:35:45 -0800141 texthigh_linear = (unsigned long)lm_alias(texthigh);
Kees Cookf5509cc2016-06-07 11:05:33 -0700142 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
Kees Cookf4e6e282018-01-10 14:48:22 -0800143 usercopy_abort("linear kernel text", NULL, to_user,
144 ptr - textlow_linear, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700145}
146
Kees Cookf4e6e282018-01-10 14:48:22 -0800147static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
148 bool to_user)
Kees Cookf5509cc2016-06-07 11:05:33 -0700149{
150 /* Reject if object wraps past end of memory. */
Isaac J. Manjarres95153162019-08-13 15:37:37 -0700151 if (ptr + (n - 1) < ptr)
Kees Cookf4e6e282018-01-10 14:48:22 -0800152 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700153
154 /* Reject if NULL or ZERO-allocation. */
155 if (ZERO_OR_NULL_PTR(ptr))
Kees Cookf4e6e282018-01-10 14:48:22 -0800156 usercopy_abort("null address", NULL, to_user, ptr, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700157}
158
Kees Cook8e1f74e2016-09-07 09:54:34 -0700159/* Checks for allocs that are marked in some way as spanning multiple pages. */
Kees Cookf4e6e282018-01-10 14:48:22 -0800160static inline void check_page_span(const void *ptr, unsigned long n,
161 struct page *page, bool to_user)
Kees Cookf5509cc2016-06-07 11:05:33 -0700162{
Kees Cook8e1f74e2016-09-07 09:54:34 -0700163#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
Kees Cookf5509cc2016-06-07 11:05:33 -0700164 const void *end = ptr + n - 1;
Kees Cook8e1f74e2016-09-07 09:54:34 -0700165 struct page *endpage;
Kees Cookf5509cc2016-06-07 11:05:33 -0700166 bool is_reserved, is_cma;
167
168 /*
Kees Cookf5509cc2016-06-07 11:05:33 -0700169 * Sometimes the kernel data regions are not marked Reserved (see
170 * check below). And sometimes [_sdata,_edata) does not cover
171 * rodata and/or bss, so check each range explicitly.
172 */
173
174 /* Allow reads of kernel rodata region (if not marked as Reserved). */
175 if (ptr >= (const void *)__start_rodata &&
176 end <= (const void *)__end_rodata) {
177 if (!to_user)
Kees Cookf4e6e282018-01-10 14:48:22 -0800178 usercopy_abort("rodata", NULL, to_user, 0, n);
179 return;
Kees Cookf5509cc2016-06-07 11:05:33 -0700180 }
181
182 /* Allow kernel data region (if not marked as Reserved). */
183 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
Kees Cookf4e6e282018-01-10 14:48:22 -0800184 return;
Kees Cookf5509cc2016-06-07 11:05:33 -0700185
186 /* Allow kernel bss region (if not marked as Reserved). */
187 if (ptr >= (const void *)__bss_start &&
188 end <= (const void *)__bss_stop)
Kees Cookf4e6e282018-01-10 14:48:22 -0800189 return;
Kees Cookf5509cc2016-06-07 11:05:33 -0700190
191 /* Is the object wholly within one base page? */
192 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
193 ((unsigned long)end & (unsigned long)PAGE_MASK)))
Kees Cookf4e6e282018-01-10 14:48:22 -0800194 return;
Kees Cookf5509cc2016-06-07 11:05:33 -0700195
Kees Cook8e1f74e2016-09-07 09:54:34 -0700196 /* Allow if fully inside the same compound (__GFP_COMP) page. */
Kees Cookf5509cc2016-06-07 11:05:33 -0700197 endpage = virt_to_head_page(end);
198 if (likely(endpage == page))
Kees Cookf4e6e282018-01-10 14:48:22 -0800199 return;
Kees Cookf5509cc2016-06-07 11:05:33 -0700200
201 /*
202 * Reject if range is entirely either Reserved (i.e. special or
203 * device memory), or CMA. Otherwise, reject since the object spans
204 * several independently allocated pages.
205 */
206 is_reserved = PageReserved(page);
207 is_cma = is_migrate_cma_page(page);
208 if (!is_reserved && !is_cma)
Kees Cookf4e6e282018-01-10 14:48:22 -0800209 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700210
211 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
212 page = virt_to_head_page(ptr);
213 if (is_reserved && !PageReserved(page))
Kees Cookf4e6e282018-01-10 14:48:22 -0800214 usercopy_abort("spans Reserved and non-Reserved pages",
215 NULL, to_user, 0, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700216 if (is_cma && !is_migrate_cma_page(page))
Kees Cookf4e6e282018-01-10 14:48:22 -0800217 usercopy_abort("spans CMA and non-CMA pages", NULL,
218 to_user, 0, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700219 }
Kees Cook8e1f74e2016-09-07 09:54:34 -0700220#endif
Kees Cook8e1f74e2016-09-07 09:54:34 -0700221}
Kees Cookf5509cc2016-06-07 11:05:33 -0700222
Kees Cookf4e6e282018-01-10 14:48:22 -0800223static inline void check_heap_object(const void *ptr, unsigned long n,
224 bool to_user)
Kees Cook8e1f74e2016-09-07 09:54:34 -0700225{
226 struct page *page;
227
Kees Cook8e1f74e2016-09-07 09:54:34 -0700228 if (!virt_addr_valid(ptr))
Kees Cookf4e6e282018-01-10 14:48:22 -0800229 return;
Kees Cook8e1f74e2016-09-07 09:54:34 -0700230
Kees Cook314eed32019-09-17 11:00:25 -0700231 /*
232 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
233 * highmem page or fallback to virt_to_page(). The following
234 * is effectively a highmem-aware virt_to_head_page().
235 */
236 page = compound_head(kmap_to_page((void *)ptr));
Kees Cook8e1f74e2016-09-07 09:54:34 -0700237
Kees Cookf4e6e282018-01-10 14:48:22 -0800238 if (PageSlab(page)) {
239 /* Check slab allocator for flags and size. */
240 __check_heap_object(ptr, n, page, to_user);
241 } else {
242 /* Verify object does not incorrectly span multiple pages. */
243 check_page_span(ptr, n, page, to_user);
244 }
Kees Cookf5509cc2016-06-07 11:05:33 -0700245}
246
Chris von Recklinghausenb5cb15d2018-07-03 15:43:08 -0400247static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
248
Kees Cookf5509cc2016-06-07 11:05:33 -0700249/*
250 * Validates that the given object is:
251 * - not bogus address
Qian Cai7bff3c02019-01-08 15:23:04 -0800252 * - fully contained by stack (or stack frame, when available)
253 * - fully within SLAB object (or object whitelist area, when available)
Kees Cookf5509cc2016-06-07 11:05:33 -0700254 * - not in kernel text
255 */
256void __check_object_size(const void *ptr, unsigned long n, bool to_user)
257{
Chris von Recklinghausenb5cb15d2018-07-03 15:43:08 -0400258 if (static_branch_unlikely(&bypass_usercopy_checks))
259 return;
260
Kees Cookf5509cc2016-06-07 11:05:33 -0700261 /* Skip all tests if size is zero. */
262 if (!n)
263 return;
264
265 /* Check for invalid addresses. */
Kees Cookf4e6e282018-01-10 14:48:22 -0800266 check_bogus_address((const unsigned long)ptr, n, to_user);
Kees Cookf5509cc2016-06-07 11:05:33 -0700267
Kees Cookf5509cc2016-06-07 11:05:33 -0700268 /* Check for bad stack object. */
269 switch (check_stack_object(ptr, n)) {
270 case NOT_STACK:
271 /* Object is not touching the current process stack. */
272 break;
273 case GOOD_FRAME:
274 case GOOD_STACK:
275 /*
276 * Object is either in the correct frame (when it
277 * is possible to check) or just generally on the
278 * process stack (when frame checking not available).
279 */
280 return;
281 default:
Kees Cookf4e6e282018-01-10 14:48:22 -0800282 usercopy_abort("process stack", NULL, to_user, 0, n);
Kees Cookf5509cc2016-06-07 11:05:33 -0700283 }
284
Qian Cai7bff3c02019-01-08 15:23:04 -0800285 /* Check for bad heap object. */
286 check_heap_object(ptr, n, to_user);
287
Kees Cookf5509cc2016-06-07 11:05:33 -0700288 /* Check for object in kernel to avoid text exposure. */
Kees Cookf4e6e282018-01-10 14:48:22 -0800289 check_kernel_text_object((const unsigned long)ptr, n, to_user);
Kees Cookf5509cc2016-06-07 11:05:33 -0700290}
291EXPORT_SYMBOL(__check_object_size);
Chris von Recklinghausenb5cb15d2018-07-03 15:43:08 -0400292
293static bool enable_checks __initdata = true;
294
295static int __init parse_hardened_usercopy(char *str)
296{
297 return strtobool(str, &enable_checks);
298}
299
300__setup("hardened_usercopy=", parse_hardened_usercopy);
301
302static int __init set_hardened_usercopy(void)
303{
304 if (enable_checks == false)
305 static_branch_enable(&bypass_usercopy_checks);
306 return 1;
307}
308
309late_initcall(set_hardened_usercopy);