blob: 0aeec1960f91a6c8094273a75d830ba61eb19a46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/seccomp.c
3 *
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
5 *
Will Drewrye2cfabdf2012-04-12 16:47:57 -05006 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
8 *
9 * This defines a simple but solid secure-computing facility.
10 *
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Will Drewrye2cfabdf2012-04-12 16:47:57 -050016#include <linux/atomic.h>
Eric Paris85e7bac2012-01-03 14:23:05 -050017#include <linux/audit.h>
Roland McGrath5b101742009-02-27 23:25:54 -080018#include <linux/compat.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050019#include <linux/sched.h>
20#include <linux/seccomp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22/* #define SECCOMP_DEBUG 1 */
Will Drewrye2cfabdf2012-04-12 16:47:57 -050023
24#ifdef CONFIG_SECCOMP_FILTER
25#include <asm/syscall.h>
26#include <linux/filter.h>
27#include <linux/security.h>
28#include <linux/slab.h>
29#include <linux/tracehook.h>
30#include <linux/uaccess.h>
31
32/**
33 * struct seccomp_filter - container for seccomp BPF programs
34 *
35 * @usage: reference count to manage the object lifetime.
36 * get/put helpers should be used when accessing an instance
37 * outside of a lifetime-guarded section. In general, this
38 * is only needed for handling filters shared across tasks.
39 * @prev: points to a previously installed, or inherited, filter
40 * @len: the number of instructions in the program
41 * @insns: the BPF program instructions to evaluate
42 *
43 * seccomp_filter objects are organized in a tree linked via the @prev
44 * pointer. For any task, it appears to be a singly-linked list starting
45 * with current->seccomp.filter, the most recently attached or inherited filter.
46 * However, multiple filters may share a @prev node, by way of fork(), which
47 * results in a unidirectional tree existing in memory. This is similar to
48 * how namespaces work.
49 *
50 * seccomp_filter objects should never be modified after being attached
51 * to a task_struct (other than @usage).
52 */
53struct seccomp_filter {
54 atomic_t usage;
55 struct seccomp_filter *prev;
56 unsigned short len; /* Instruction count */
57 struct sock_filter insns[];
58};
59
60/* Limit any path through the tree to 256KB worth of instructions. */
61#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
62
63static void seccomp_filter_log_failure(int syscall)
64{
65 int compat = 0;
66#ifdef CONFIG_COMPAT
67 compat = is_compat_task();
68#endif
69 pr_info("%s[%d]: %ssystem call %d blocked at 0x%lx\n",
70 current->comm, task_pid_nr(current),
71 (compat ? "compat " : ""),
72 syscall, KSTK_EIP(current));
73}
74
75/**
76 * get_u32 - returns a u32 offset into data
77 * @data: a unsigned 64 bit value
78 * @index: 0 or 1 to return the first or second 32-bits
79 *
80 * This inline exists to hide the length of unsigned long. If a 32-bit
81 * unsigned long is passed in, it will be extended and the top 32-bits will be
82 * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
83 * properly returned.
84 *
85 * Endianness is explicitly ignored and left for BPF program authors to manage
86 * as per the specific architecture.
87 */
88static inline u32 get_u32(u64 data, int index)
89{
90 return ((u32 *)&data)[index];
91}
92
93/* Helper for bpf_load below. */
94#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
95/**
96 * bpf_load: checks and returns a pointer to the requested offset
97 * @off: offset into struct seccomp_data to load from
98 *
99 * Returns the requested 32-bits of data.
100 * seccomp_check_filter() should assure that @off is 32-bit aligned
101 * and not out of bounds. Failure to do so is a BUG.
102 */
103u32 seccomp_bpf_load(int off)
104{
105 struct pt_regs *regs = task_pt_regs(current);
106 if (off == BPF_DATA(nr))
107 return syscall_get_nr(current, regs);
108 if (off == BPF_DATA(arch))
109 return syscall_get_arch(current, regs);
110 if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
111 unsigned long value;
112 int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
113 int index = !!(off % sizeof(u64));
114 syscall_get_arguments(current, regs, arg, 1, &value);
115 return get_u32(value, index);
116 }
117 if (off == BPF_DATA(instruction_pointer))
118 return get_u32(KSTK_EIP(current), 0);
119 if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
120 return get_u32(KSTK_EIP(current), 1);
121 /* seccomp_check_filter should make this impossible. */
122 BUG();
123}
124
125/**
126 * seccomp_check_filter - verify seccomp filter code
127 * @filter: filter to verify
128 * @flen: length of filter
129 *
130 * Takes a previously checked filter (by sk_chk_filter) and
131 * redirects all filter code that loads struct sk_buff data
132 * and related data through seccomp_bpf_load. It also
133 * enforces length and alignment checking of those loads.
134 *
135 * Returns 0 if the rule set is legal or -EINVAL if not.
136 */
137static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
138{
139 int pc;
140 for (pc = 0; pc < flen; pc++) {
141 struct sock_filter *ftest = &filter[pc];
142 u16 code = ftest->code;
143 u32 k = ftest->k;
144
145 switch (code) {
146 case BPF_S_LD_W_ABS:
147 ftest->code = BPF_S_ANC_SECCOMP_LD_W;
148 /* 32-bit aligned and not out of bounds. */
149 if (k >= sizeof(struct seccomp_data) || k & 3)
150 return -EINVAL;
151 continue;
152 case BPF_S_LD_W_LEN:
153 ftest->code = BPF_S_LD_IMM;
154 ftest->k = sizeof(struct seccomp_data);
155 continue;
156 case BPF_S_LDX_W_LEN:
157 ftest->code = BPF_S_LDX_IMM;
158 ftest->k = sizeof(struct seccomp_data);
159 continue;
160 /* Explicitly include allowed calls. */
161 case BPF_S_RET_K:
162 case BPF_S_RET_A:
163 case BPF_S_ALU_ADD_K:
164 case BPF_S_ALU_ADD_X:
165 case BPF_S_ALU_SUB_K:
166 case BPF_S_ALU_SUB_X:
167 case BPF_S_ALU_MUL_K:
168 case BPF_S_ALU_MUL_X:
169 case BPF_S_ALU_DIV_X:
170 case BPF_S_ALU_AND_K:
171 case BPF_S_ALU_AND_X:
172 case BPF_S_ALU_OR_K:
173 case BPF_S_ALU_OR_X:
174 case BPF_S_ALU_LSH_K:
175 case BPF_S_ALU_LSH_X:
176 case BPF_S_ALU_RSH_K:
177 case BPF_S_ALU_RSH_X:
178 case BPF_S_ALU_NEG:
179 case BPF_S_LD_IMM:
180 case BPF_S_LDX_IMM:
181 case BPF_S_MISC_TAX:
182 case BPF_S_MISC_TXA:
183 case BPF_S_ALU_DIV_K:
184 case BPF_S_LD_MEM:
185 case BPF_S_LDX_MEM:
186 case BPF_S_ST:
187 case BPF_S_STX:
188 case BPF_S_JMP_JA:
189 case BPF_S_JMP_JEQ_K:
190 case BPF_S_JMP_JEQ_X:
191 case BPF_S_JMP_JGE_K:
192 case BPF_S_JMP_JGE_X:
193 case BPF_S_JMP_JGT_K:
194 case BPF_S_JMP_JGT_X:
195 case BPF_S_JMP_JSET_K:
196 case BPF_S_JMP_JSET_X:
197 continue;
198 default:
199 return -EINVAL;
200 }
201 }
202 return 0;
203}
204
205/**
206 * seccomp_run_filters - evaluates all seccomp filters against @syscall
207 * @syscall: number of the current system call
208 *
209 * Returns valid seccomp BPF response codes.
210 */
211static u32 seccomp_run_filters(int syscall)
212{
213 struct seccomp_filter *f;
214 u32 ret = SECCOMP_RET_KILL;
215 /*
216 * All filters in the list are evaluated and the lowest BPF return
217 * value always takes priority.
218 */
219 for (f = current->seccomp.filter; f; f = f->prev) {
220 ret = sk_run_filter(NULL, f->insns);
221 if (ret != SECCOMP_RET_ALLOW)
222 break;
223 }
224 return ret;
225}
226
227/**
228 * seccomp_attach_filter: Attaches a seccomp filter to current.
229 * @fprog: BPF program to install
230 *
231 * Returns 0 on success or an errno on failure.
232 */
233static long seccomp_attach_filter(struct sock_fprog *fprog)
234{
235 struct seccomp_filter *filter;
236 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
237 unsigned long total_insns = fprog->len;
238 long ret;
239
240 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
241 return -EINVAL;
242
243 for (filter = current->seccomp.filter; filter; filter = filter->prev)
244 total_insns += filter->len + 4; /* include a 4 instr penalty */
245 if (total_insns > MAX_INSNS_PER_PATH)
246 return -ENOMEM;
247
248 /*
249 * Installing a seccomp filter requires that the task have
250 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
251 * This avoids scenarios where unprivileged tasks can affect the
252 * behavior of privileged children.
253 */
254 if (!current->no_new_privs &&
255 security_capable_noaudit(current_cred(), current_user_ns(),
256 CAP_SYS_ADMIN) != 0)
257 return -EACCES;
258
259 /* Allocate a new seccomp_filter */
260 filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
261 GFP_KERNEL|__GFP_NOWARN);
262 if (!filter)
263 return -ENOMEM;
264 atomic_set(&filter->usage, 1);
265 filter->len = fprog->len;
266
267 /* Copy the instructions from fprog. */
268 ret = -EFAULT;
269 if (copy_from_user(filter->insns, fprog->filter, fp_size))
270 goto fail;
271
272 /* Check and rewrite the fprog via the skb checker */
273 ret = sk_chk_filter(filter->insns, filter->len);
274 if (ret)
275 goto fail;
276
277 /* Check and rewrite the fprog for seccomp use */
278 ret = seccomp_check_filter(filter->insns, filter->len);
279 if (ret)
280 goto fail;
281
282 /*
283 * If there is an existing filter, make it the prev and don't drop its
284 * task reference.
285 */
286 filter->prev = current->seccomp.filter;
287 current->seccomp.filter = filter;
288 return 0;
289fail:
290 kfree(filter);
291 return ret;
292}
293
294/**
295 * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
296 * @user_filter: pointer to the user data containing a sock_fprog.
297 *
298 * Returns 0 on success and non-zero otherwise.
299 */
300long seccomp_attach_user_filter(char __user *user_filter)
301{
302 struct sock_fprog fprog;
303 long ret = -EFAULT;
304
305#ifdef CONFIG_COMPAT
306 if (is_compat_task()) {
307 struct compat_sock_fprog fprog32;
308 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
309 goto out;
310 fprog.len = fprog32.len;
311 fprog.filter = compat_ptr(fprog32.filter);
312 } else /* falls through to the if below. */
313#endif
314 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
315 goto out;
316 ret = seccomp_attach_filter(&fprog);
317out:
318 return ret;
319}
320
321/* get_seccomp_filter - increments the reference count of the filter on @tsk */
322void get_seccomp_filter(struct task_struct *tsk)
323{
324 struct seccomp_filter *orig = tsk->seccomp.filter;
325 if (!orig)
326 return;
327 /* Reference count is bounded by the number of total processes. */
328 atomic_inc(&orig->usage);
329}
330
331/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
332void put_seccomp_filter(struct task_struct *tsk)
333{
334 struct seccomp_filter *orig = tsk->seccomp.filter;
335 /* Clean up single-reference branches iteratively. */
336 while (orig && atomic_dec_and_test(&orig->usage)) {
337 struct seccomp_filter *freeme = orig;
338 orig = orig->prev;
339 kfree(freeme);
340 }
341}
342#endif /* CONFIG_SECCOMP_FILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344/*
345 * Secure computing mode 1 allows only read/write/exit/sigreturn.
346 * To be fully secure this must be combined with rlimit
347 * to limit the stack allocations too.
348 */
349static int mode1_syscalls[] = {
350 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
351 0, /* null terminated */
352};
353
Roland McGrath5b101742009-02-27 23:25:54 -0800354#ifdef CONFIG_COMPAT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355static int mode1_syscalls_32[] = {
356 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
357 0, /* null terminated */
358};
359#endif
360
361void __secure_computing(int this_syscall)
362{
363 int mode = current->seccomp.mode;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500364 int exit_sig = 0;
365 int *syscall;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 switch (mode) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500368 case SECCOMP_MODE_STRICT:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 syscall = mode1_syscalls;
Roland McGrath5b101742009-02-27 23:25:54 -0800370#ifdef CONFIG_COMPAT
371 if (is_compat_task())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 syscall = mode1_syscalls_32;
373#endif
374 do {
375 if (*syscall == this_syscall)
376 return;
377 } while (*++syscall);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500378 exit_sig = SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 break;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500380#ifdef CONFIG_SECCOMP_FILTER
381 case SECCOMP_MODE_FILTER:
382 if (seccomp_run_filters(this_syscall) == SECCOMP_RET_ALLOW)
383 return;
384 seccomp_filter_log_failure(this_syscall);
385 exit_sig = SIGSYS;
386 break;
387#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 default:
389 BUG();
390 }
391
392#ifdef SECCOMP_DEBUG
393 dump_stack();
394#endif
Eric Paris85e7bac2012-01-03 14:23:05 -0500395 audit_seccomp(this_syscall);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500396 do_exit(exit_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700398
399long prctl_get_seccomp(void)
400{
401 return current->seccomp.mode;
402}
403
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500404/**
405 * prctl_set_seccomp: configures current->seccomp.mode
406 * @seccomp_mode: requested mode to use
407 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
408 *
409 * This function may be called repeatedly with a @seccomp_mode of
410 * SECCOMP_MODE_FILTER to install additional filters. Every filter
411 * successfully installed will be evaluated (in reverse order) for each system
412 * call the task makes.
413 *
414 * Once current->seccomp.mode is non-zero, it may not be changed.
415 *
416 * Returns 0 on success or -EINVAL on failure.
417 */
418long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700419{
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500420 long ret = -EINVAL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700421
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500422 if (current->seccomp.mode &&
423 current->seccomp.mode != seccomp_mode)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700424 goto out;
425
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500426 switch (seccomp_mode) {
427 case SECCOMP_MODE_STRICT:
428 ret = 0;
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700429#ifdef TIF_NOTSC
430 disable_TSC();
431#endif
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500432 break;
433#ifdef CONFIG_SECCOMP_FILTER
434 case SECCOMP_MODE_FILTER:
435 ret = seccomp_attach_user_filter(filter);
436 if (ret)
437 goto out;
438 break;
439#endif
440 default:
441 goto out;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700442 }
443
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500444 current->seccomp.mode = seccomp_mode;
445 set_thread_flag(TIF_SECCOMP);
446out:
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700447 return ret;
448}