blob: 8ebae91a6b5519cebf28c623527f6429904820b1 [file] [log] [blame]
Kees Cookf5509cc2016-06-07 11:05:33 -07001/*
2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3 * which are designed to protect kernel memory from needless exposure
4 * and overwrite under many unintended conditions. This code is based
5 * on PAX_USERCOPY, which is:
6 *
7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
8 * Security Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <asm/sections.h>
20
21enum {
22 BAD_STACK = -1,
23 NOT_STACK = 0,
24 GOOD_FRAME,
25 GOOD_STACK,
26};
27
28/*
29 * Checks if a given pointer and length is contained by the current
30 * stack frame (if possible).
31 *
32 * Returns:
33 * NOT_STACK: not at all on the stack
34 * GOOD_FRAME: fully within a valid stack frame
35 * GOOD_STACK: fully on the stack (when can't do frame-checking)
36 * BAD_STACK: error condition (invalid stack position or bad stack frame)
37 */
38static noinline int check_stack_object(const void *obj, unsigned long len)
39{
40 const void * const stack = task_stack_page(current);
41 const void * const stackend = stack + THREAD_SIZE;
42 int ret;
43
44 /* Object is not on the stack at all. */
45 if (obj + len <= stack || stackend <= obj)
46 return NOT_STACK;
47
48 /*
49 * Reject: object partially overlaps the stack (passing the
50 * the check above means at least one end is within the stack,
51 * so if this check fails, the other end is outside the stack).
52 */
53 if (obj < stack || stackend < obj + len)
54 return BAD_STACK;
55
56 /* Check if object is safely within a valid frame. */
57 ret = arch_within_stack_frames(stack, stackend, obj, len);
58 if (ret)
59 return ret;
60
61 return GOOD_STACK;
62}
63
64static void report_usercopy(const void *ptr, unsigned long len,
65 bool to_user, const char *type)
66{
67 pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
68 to_user ? "exposure" : "overwrite",
69 to_user ? "from" : "to", ptr, type ? : "unknown", len);
70 /*
71 * For greater effect, it would be nice to do do_group_exit(),
72 * but BUG() actually hooks all the lock-breaking and per-arch
73 * Oops code, so that is used here instead.
74 */
75 BUG();
76}
77
78/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
79static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
80 unsigned long high)
81{
82 unsigned long check_low = (uintptr_t)ptr;
83 unsigned long check_high = check_low + n;
84
85 /* Does not overlap if entirely above or entirely below. */
86 if (check_low >= high || check_high < low)
87 return false;
88
89 return true;
90}
91
92/* Is this address range in the kernel text area? */
93static inline const char *check_kernel_text_object(const void *ptr,
94 unsigned long n)
95{
96 unsigned long textlow = (unsigned long)_stext;
97 unsigned long texthigh = (unsigned long)_etext;
98 unsigned long textlow_linear, texthigh_linear;
99
100 if (overlaps(ptr, n, textlow, texthigh))
101 return "<kernel text>";
102
103 /*
104 * Some architectures have virtual memory mappings with a secondary
105 * mapping of the kernel text, i.e. there is more than one virtual
106 * kernel address that points to the kernel image. It is usually
107 * when there is a separate linear physical memory mapping, in that
108 * __pa() is not just the reverse of __va(). This can be detected
109 * and checked:
110 */
111 textlow_linear = (unsigned long)__va(__pa(textlow));
112 /* No different mapping: we're done. */
113 if (textlow_linear == textlow)
114 return NULL;
115
116 /* Check the secondary mapping... */
117 texthigh_linear = (unsigned long)__va(__pa(texthigh));
118 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
119 return "<linear kernel text>";
120
121 return NULL;
122}
123
124static inline const char *check_bogus_address(const void *ptr, unsigned long n)
125{
126 /* Reject if object wraps past end of memory. */
127 if (ptr + n < ptr)
128 return "<wrapped address>";
129
130 /* Reject if NULL or ZERO-allocation. */
131 if (ZERO_OR_NULL_PTR(ptr))
132 return "<null>";
133
134 return NULL;
135}
136
137static inline const char *check_heap_object(const void *ptr, unsigned long n,
138 bool to_user)
139{
140 struct page *page, *endpage;
141 const void *end = ptr + n - 1;
142 bool is_reserved, is_cma;
143
144 /*
145 * Some architectures (arm64) return true for virt_addr_valid() on
146 * vmalloced addresses. Work around this by checking for vmalloc
147 * first.
148 */
149 if (is_vmalloc_addr(ptr))
150 return NULL;
151
152 if (!virt_addr_valid(ptr))
153 return NULL;
154
155 page = virt_to_head_page(ptr);
156
157 /* Check slab allocator for flags and size. */
158 if (PageSlab(page))
159 return __check_heap_object(ptr, n, page);
160
161 /*
162 * Sometimes the kernel data regions are not marked Reserved (see
163 * check below). And sometimes [_sdata,_edata) does not cover
164 * rodata and/or bss, so check each range explicitly.
165 */
166
167 /* Allow reads of kernel rodata region (if not marked as Reserved). */
168 if (ptr >= (const void *)__start_rodata &&
169 end <= (const void *)__end_rodata) {
170 if (!to_user)
171 return "<rodata>";
172 return NULL;
173 }
174
175 /* Allow kernel data region (if not marked as Reserved). */
176 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
177 return NULL;
178
179 /* Allow kernel bss region (if not marked as Reserved). */
180 if (ptr >= (const void *)__bss_start &&
181 end <= (const void *)__bss_stop)
182 return NULL;
183
184 /* Is the object wholly within one base page? */
185 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
186 ((unsigned long)end & (unsigned long)PAGE_MASK)))
187 return NULL;
188
189 /* Allow if start and end are inside the same compound page. */
190 endpage = virt_to_head_page(end);
191 if (likely(endpage == page))
192 return NULL;
193
194 /*
195 * Reject if range is entirely either Reserved (i.e. special or
196 * device memory), or CMA. Otherwise, reject since the object spans
197 * several independently allocated pages.
198 */
199 is_reserved = PageReserved(page);
200 is_cma = is_migrate_cma_page(page);
201 if (!is_reserved && !is_cma)
202 goto reject;
203
204 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205 page = virt_to_head_page(ptr);
206 if (is_reserved && !PageReserved(page))
207 goto reject;
208 if (is_cma && !is_migrate_cma_page(page))
209 goto reject;
210 }
211
212 return NULL;
213
214reject:
215 return "<spans multiple pages>";
216}
217
218/*
219 * Validates that the given object is:
220 * - not bogus address
221 * - known-safe heap or stack object
222 * - not in kernel text
223 */
224void __check_object_size(const void *ptr, unsigned long n, bool to_user)
225{
226 const char *err;
227
228 /* Skip all tests if size is zero. */
229 if (!n)
230 return;
231
232 /* Check for invalid addresses. */
233 err = check_bogus_address(ptr, n);
234 if (err)
235 goto report;
236
237 /* Check for bad heap object. */
238 err = check_heap_object(ptr, n, to_user);
239 if (err)
240 goto report;
241
242 /* Check for bad stack object. */
243 switch (check_stack_object(ptr, n)) {
244 case NOT_STACK:
245 /* Object is not touching the current process stack. */
246 break;
247 case GOOD_FRAME:
248 case GOOD_STACK:
249 /*
250 * Object is either in the correct frame (when it
251 * is possible to check) or just generally on the
252 * process stack (when frame checking not available).
253 */
254 return;
255 default:
256 err = "<process stack>";
257 goto report;
258 }
259
260 /* Check for object in kernel to avoid text exposure. */
261 err = check_kernel_text_object(ptr, n);
262 if (!err)
263 return;
264
265report:
266 report_usercopy(ptr, n, to_user, err);
267}
268EXPORT_SYMBOL(__check_object_size);