blob: 3975856d476c23b93534096763aefcc07a37cec6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/seccomp.c
3 *
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
5 *
Will Drewrye2cfabdf2012-04-12 16:47:57 -05006 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
8 *
9 * This defines a simple but solid secure-computing facility.
10 *
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Will Drewrye2cfabdf2012-04-12 16:47:57 -050016#include <linux/atomic.h>
Eric Paris85e7bac2012-01-03 14:23:05 -050017#include <linux/audit.h>
Roland McGrath5b101742009-02-27 23:25:54 -080018#include <linux/compat.h>
Kees Cook0a112f102018-05-01 15:07:31 -070019#include <linux/nospec.h>
20#include <linux/prctl.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050021#include <linux/sched.h>
22#include <linux/seccomp.h>
Kees Cookc8bee432014-06-27 15:16:33 -070023#include <linux/slab.h>
Kees Cook48dc92b2014-06-25 16:08:24 -070024#include <linux/syscalls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070026#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050027#include <asm/syscall.h>
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070028#endif
Will Drewrye2cfabdf2012-04-12 16:47:57 -050029
30#ifdef CONFIG_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050031#include <linux/filter.h>
Kees Cookc2e1f2e2014-06-05 00:23:17 -070032#include <linux/pid.h>
Will Drewryfb0fadf2012-04-12 16:48:02 -050033#include <linux/ptrace.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050034#include <linux/security.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050035#include <linux/tracehook.h>
36#include <linux/uaccess.h>
37
38/**
39 * struct seccomp_filter - container for seccomp BPF programs
40 *
41 * @usage: reference count to manage the object lifetime.
42 * get/put helpers should be used when accessing an instance
43 * outside of a lifetime-guarded section. In general, this
44 * is only needed for handling filters shared across tasks.
45 * @prev: points to a previously installed, or inherited, filter
46 * @len: the number of instructions in the program
Fabian Frederick119ce5c2014-06-06 14:37:53 -070047 * @insnsi: the BPF program instructions to evaluate
Will Drewrye2cfabdf2012-04-12 16:47:57 -050048 *
49 * seccomp_filter objects are organized in a tree linked via the @prev
50 * pointer. For any task, it appears to be a singly-linked list starting
51 * with current->seccomp.filter, the most recently attached or inherited filter.
52 * However, multiple filters may share a @prev node, by way of fork(), which
53 * results in a unidirectional tree existing in memory. This is similar to
54 * how namespaces work.
55 *
56 * seccomp_filter objects should never be modified after being attached
57 * to a task_struct (other than @usage).
58 */
59struct seccomp_filter {
60 atomic_t usage;
61 struct seccomp_filter *prev;
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070062 struct bpf_prog *prog;
Will Drewrye2cfabdf2012-04-12 16:47:57 -050063};
64
65/* Limit any path through the tree to 256KB worth of instructions. */
66#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
67
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010068/*
Will Drewrye2cfabdf2012-04-12 16:47:57 -050069 * Endianness is explicitly ignored and left for BPF program authors to manage
70 * as per the specific architecture.
71 */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010072static void populate_seccomp_data(struct seccomp_data *sd)
Will Drewrye2cfabdf2012-04-12 16:47:57 -050073{
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010074 struct task_struct *task = current;
75 struct pt_regs *regs = task_pt_regs(task);
Daniel Borkmann2eac7642014-04-14 21:02:59 +020076 unsigned long args[6];
Will Drewrye2cfabdf2012-04-12 16:47:57 -050077
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010078 sd->nr = syscall_get_nr(task, regs);
Linus Torvalds0b747172014-04-12 12:38:53 -070079 sd->arch = syscall_get_arch();
Daniel Borkmann2eac7642014-04-14 21:02:59 +020080 syscall_get_arguments(task, regs, 0, 6, args);
81 sd->args[0] = args[0];
82 sd->args[1] = args[1];
83 sd->args[2] = args[2];
84 sd->args[3] = args[3];
85 sd->args[4] = args[4];
86 sd->args[5] = args[5];
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010087 sd->instruction_pointer = KSTK_EIP(task);
Will Drewrye2cfabdf2012-04-12 16:47:57 -050088}
89
90/**
91 * seccomp_check_filter - verify seccomp filter code
92 * @filter: filter to verify
93 * @flen: length of filter
94 *
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070095 * Takes a previously checked filter (by bpf_check_classic) and
Will Drewrye2cfabdf2012-04-12 16:47:57 -050096 * redirects all filter code that loads struct sk_buff data
97 * and related data through seccomp_bpf_load. It also
98 * enforces length and alignment checking of those loads.
99 *
100 * Returns 0 if the rule set is legal or -EINVAL if not.
101 */
102static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
103{
104 int pc;
105 for (pc = 0; pc < flen; pc++) {
106 struct sock_filter *ftest = &filter[pc];
107 u16 code = ftest->code;
108 u32 k = ftest->k;
109
110 switch (code) {
Daniel Borkmann34805932014-05-29 10:22:50 +0200111 case BPF_LD | BPF_W | BPF_ABS:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100112 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500113 /* 32-bit aligned and not out of bounds. */
114 if (k >= sizeof(struct seccomp_data) || k & 3)
115 return -EINVAL;
116 continue;
Daniel Borkmann34805932014-05-29 10:22:50 +0200117 case BPF_LD | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100118 ftest->code = BPF_LD | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500119 ftest->k = sizeof(struct seccomp_data);
120 continue;
Daniel Borkmann34805932014-05-29 10:22:50 +0200121 case BPF_LDX | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100122 ftest->code = BPF_LDX | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500123 ftest->k = sizeof(struct seccomp_data);
124 continue;
125 /* Explicitly include allowed calls. */
Daniel Borkmann34805932014-05-29 10:22:50 +0200126 case BPF_RET | BPF_K:
127 case BPF_RET | BPF_A:
128 case BPF_ALU | BPF_ADD | BPF_K:
129 case BPF_ALU | BPF_ADD | BPF_X:
130 case BPF_ALU | BPF_SUB | BPF_K:
131 case BPF_ALU | BPF_SUB | BPF_X:
132 case BPF_ALU | BPF_MUL | BPF_K:
133 case BPF_ALU | BPF_MUL | BPF_X:
134 case BPF_ALU | BPF_DIV | BPF_K:
135 case BPF_ALU | BPF_DIV | BPF_X:
136 case BPF_ALU | BPF_AND | BPF_K:
137 case BPF_ALU | BPF_AND | BPF_X:
138 case BPF_ALU | BPF_OR | BPF_K:
139 case BPF_ALU | BPF_OR | BPF_X:
140 case BPF_ALU | BPF_XOR | BPF_K:
141 case BPF_ALU | BPF_XOR | BPF_X:
142 case BPF_ALU | BPF_LSH | BPF_K:
143 case BPF_ALU | BPF_LSH | BPF_X:
144 case BPF_ALU | BPF_RSH | BPF_K:
145 case BPF_ALU | BPF_RSH | BPF_X:
146 case BPF_ALU | BPF_NEG:
147 case BPF_LD | BPF_IMM:
148 case BPF_LDX | BPF_IMM:
149 case BPF_MISC | BPF_TAX:
150 case BPF_MISC | BPF_TXA:
151 case BPF_LD | BPF_MEM:
152 case BPF_LDX | BPF_MEM:
153 case BPF_ST:
154 case BPF_STX:
155 case BPF_JMP | BPF_JA:
156 case BPF_JMP | BPF_JEQ | BPF_K:
157 case BPF_JMP | BPF_JEQ | BPF_X:
158 case BPF_JMP | BPF_JGE | BPF_K:
159 case BPF_JMP | BPF_JGE | BPF_X:
160 case BPF_JMP | BPF_JGT | BPF_K:
161 case BPF_JMP | BPF_JGT | BPF_X:
162 case BPF_JMP | BPF_JSET | BPF_K:
163 case BPF_JMP | BPF_JSET | BPF_X:
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500164 continue;
165 default:
166 return -EINVAL;
167 }
168 }
169 return 0;
170}
171
172/**
173 * seccomp_run_filters - evaluates all seccomp filters against @syscall
174 * @syscall: number of the current system call
175 *
176 * Returns valid seccomp BPF response codes.
177 */
Kees Cook8112c4f2016-06-01 16:02:17 -0700178static u32 seccomp_run_filters(const struct seccomp_data *sd)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500179{
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700180 struct seccomp_data sd_local;
Will Drewryacf3b2c2012-04-12 16:47:59 -0500181 u32 ret = SECCOMP_RET_ALLOW;
Pranith Kumar8225d382014-11-21 10:06:01 -0500182 /* Make sure cross-thread synced filter points somewhere sane. */
183 struct seccomp_filter *f =
184 lockless_dereference(current->seccomp.filter);
Will Drewryacf3b2c2012-04-12 16:47:59 -0500185
186 /* Ensure unexpected behavior doesn't result in failing open. */
Kees Cook3ba25302014-06-27 15:01:35 -0700187 if (unlikely(WARN_ON(f == NULL)))
Will Drewryacf3b2c2012-04-12 16:47:59 -0500188 return SECCOMP_RET_KILL;
189
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700190 if (!sd) {
191 populate_seccomp_data(&sd_local);
192 sd = &sd_local;
193 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100194
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500195 /*
196 * All filters in the list are evaluated and the lowest BPF return
Will Drewryacf3b2c2012-04-12 16:47:59 -0500197 * value always takes priority (ignoring the DATA).
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500198 */
Kees Cook3ba25302014-06-27 15:01:35 -0700199 for (; f; f = f->prev) {
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700200 u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
Alexei Starovoitov8f577ca2014-05-13 19:50:47 -0700201
Will Drewryacf3b2c2012-04-12 16:47:59 -0500202 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
203 ret = cur_ret;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500204 }
205 return ret;
206}
Kees Cook1f41b4502014-06-25 15:38:02 -0700207#endif /* CONFIG_SECCOMP_FILTER */
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500208
Kees Cook1f41b4502014-06-25 15:38:02 -0700209static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
210{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700211 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700212
Kees Cook1f41b4502014-06-25 15:38:02 -0700213 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
214 return false;
215
216 return true;
217}
218
Thomas Gleixner094c2762018-05-04 15:12:06 +0200219void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
Kees Cook0a112f102018-05-01 15:07:31 -0700220
Kees Cook3ba25302014-06-27 15:01:35 -0700221static inline void seccomp_assign_mode(struct task_struct *task,
Kees Cookab677c22018-05-03 14:56:12 -0700222 unsigned long seccomp_mode,
223 unsigned long flags)
Kees Cook1f41b4502014-06-25 15:38:02 -0700224{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700225 assert_spin_locked(&task->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700226
Kees Cook3ba25302014-06-27 15:01:35 -0700227 task->seccomp.mode = seccomp_mode;
228 /*
229 * Make sure TIF_SECCOMP cannot be set before the mode (and
230 * filter) is set.
231 */
232 smp_mb__before_atomic();
Kees Cookab677c22018-05-03 14:56:12 -0700233 /* Assume default seccomp processes want spec flaw mitigation. */
234 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
Thomas Gleixner094c2762018-05-04 15:12:06 +0200235 arch_seccomp_spec_mitigate(task);
Kees Cook3ba25302014-06-27 15:01:35 -0700236 set_tsk_thread_flag(task, TIF_SECCOMP);
Kees Cook1f41b4502014-06-25 15:38:02 -0700237}
238
239#ifdef CONFIG_SECCOMP_FILTER
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700240/* Returns 1 if the parent is an ancestor of the child. */
241static int is_ancestor(struct seccomp_filter *parent,
242 struct seccomp_filter *child)
243{
244 /* NULL is the root ancestor. */
245 if (parent == NULL)
246 return 1;
247 for (; child; child = child->prev)
248 if (child == parent)
249 return 1;
250 return 0;
251}
252
253/**
254 * seccomp_can_sync_threads: checks if all threads can be synchronized
255 *
256 * Expects sighand and cred_guard_mutex locks to be held.
257 *
258 * Returns 0 on success, -ve on error, or the pid of a thread which was
259 * either not in the correct seccomp mode or it did not have an ancestral
260 * seccomp filter.
261 */
262static inline pid_t seccomp_can_sync_threads(void)
263{
264 struct task_struct *thread, *caller;
265
266 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700267 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700268
269 /* Validate all threads being eligible for synchronization. */
270 caller = current;
271 for_each_thread(caller, thread) {
272 pid_t failed;
273
274 /* Skip current, since it is initiating the sync. */
275 if (thread == caller)
276 continue;
277
278 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
279 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
280 is_ancestor(thread->seccomp.filter,
281 caller->seccomp.filter)))
282 continue;
283
284 /* Return the first thread that cannot be synchronized. */
285 failed = task_pid_vnr(thread);
286 /* If the pid cannot be resolved, then return -ESRCH */
287 if (unlikely(WARN_ON(failed == 0)))
288 failed = -ESRCH;
289 return failed;
290 }
291
292 return 0;
293}
294
295/**
296 * seccomp_sync_threads: sets all threads to use current's filter
297 *
298 * Expects sighand and cred_guard_mutex locks to be held, and for
299 * seccomp_can_sync_threads() to have returned success already
300 * without dropping the locks.
301 *
302 */
Kees Cookab677c22018-05-03 14:56:12 -0700303static inline void seccomp_sync_threads(unsigned long flags)
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700304{
305 struct task_struct *thread, *caller;
306
307 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700308 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700309
310 /* Synchronize all threads. */
311 caller = current;
312 for_each_thread(caller, thread) {
313 /* Skip current, since it needs no changes. */
314 if (thread == caller)
315 continue;
316
317 /* Get a task reference for the new leaf node. */
318 get_seccomp_filter(caller);
319 /*
320 * Drop the task reference to the shared ancestor since
321 * current's path will hold a reference. (This also
322 * allows a put before the assignment.)
323 */
324 put_seccomp_filter(thread);
325 smp_store_release(&thread->seccomp.filter,
326 caller->seccomp.filter);
Jann Horn103502a2015-12-26 06:00:48 +0100327
328 /*
329 * Don't let an unprivileged task work around
330 * the no_new_privs restriction by creating
331 * a thread that sets it up, enters seccomp,
332 * then dies.
333 */
334 if (task_no_new_privs(caller))
335 task_set_no_new_privs(thread);
336
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700337 /*
338 * Opt the other thread into seccomp if needed.
339 * As threads are considered to be trust-realm
340 * equivalent (see ptrace_may_access), it is safe to
341 * allow one thread to transition the other.
342 */
Jann Horn103502a2015-12-26 06:00:48 +0100343 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
Kees Cookab677c22018-05-03 14:56:12 -0700344 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
345 flags);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700346 }
347}
348
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500349/**
Kees Cookc8bee432014-06-27 15:16:33 -0700350 * seccomp_prepare_filter: Prepares a seccomp filter for use.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500351 * @fprog: BPF program to install
352 *
Kees Cookc8bee432014-06-27 15:16:33 -0700353 * Returns filter on success or an ERR_PTR on failure.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500354 */
Kees Cookc8bee432014-06-27 15:16:33 -0700355static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500356{
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200357 struct seccomp_filter *sfilter;
358 int ret;
Masahiro Yamada97f26452016-08-03 13:45:50 -0700359 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500360
361 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
Kees Cookc8bee432014-06-27 15:16:33 -0700362 return ERR_PTR(-EINVAL);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200363
Kees Cookc8bee432014-06-27 15:16:33 -0700364 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500365
366 /*
Fabian Frederick119ce5c2014-06-06 14:37:53 -0700367 * Installing a seccomp filter requires that the task has
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500368 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
369 * This avoids scenarios where unprivileged tasks can affect the
370 * behavior of privileged children.
371 */
Kees Cook1d4457f2014-05-21 15:23:46 -0700372 if (!task_no_new_privs(current) &&
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500373 security_capable_noaudit(current_cred(), current_user_ns(),
374 CAP_SYS_ADMIN) != 0)
Kees Cookc8bee432014-06-27 15:16:33 -0700375 return ERR_PTR(-EACCES);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500376
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100377 /* Allocate a new seccomp_filter */
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200378 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
379 if (!sfilter)
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200380 return ERR_PTR(-ENOMEM);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200381
382 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900383 seccomp_check_filter, save_orig);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200384 if (ret < 0) {
385 kfree(sfilter);
386 return ERR_PTR(ret);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200387 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100388
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200389 atomic_set(&sfilter->usage, 1);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500390
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200391 return sfilter;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500392}
393
394/**
Kees Cookc8bee432014-06-27 15:16:33 -0700395 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500396 * @user_filter: pointer to the user data containing a sock_fprog.
397 *
398 * Returns 0 on success and non-zero otherwise.
399 */
Kees Cookc8bee432014-06-27 15:16:33 -0700400static struct seccomp_filter *
401seccomp_prepare_user_filter(const char __user *user_filter)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500402{
403 struct sock_fprog fprog;
Kees Cookc8bee432014-06-27 15:16:33 -0700404 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500405
406#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700407 if (in_compat_syscall()) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500408 struct compat_sock_fprog fprog32;
409 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
410 goto out;
411 fprog.len = fprog32.len;
412 fprog.filter = compat_ptr(fprog32.filter);
413 } else /* falls through to the if below. */
414#endif
415 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
416 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700417 filter = seccomp_prepare_filter(&fprog);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500418out:
Kees Cookc8bee432014-06-27 15:16:33 -0700419 return filter;
420}
421
422/**
423 * seccomp_attach_filter: validate and attach filter
424 * @flags: flags to change filter behavior
425 * @filter: seccomp filter to add to the current process
426 *
Kees Cookdbd952122014-06-27 15:18:48 -0700427 * Caller must be holding current->sighand->siglock lock.
428 *
Kees Cookc8bee432014-06-27 15:16:33 -0700429 * Returns 0 on success, -ve on error.
430 */
431static long seccomp_attach_filter(unsigned int flags,
432 struct seccomp_filter *filter)
433{
434 unsigned long total_insns;
435 struct seccomp_filter *walker;
436
Guenter Roeck69f6a342014-08-10 20:50:30 -0700437 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700438
Kees Cookc8bee432014-06-27 15:16:33 -0700439 /* Validate resulting filter length. */
440 total_insns = filter->prog->len;
441 for (walker = current->seccomp.filter; walker; walker = walker->prev)
442 total_insns += walker->prog->len + 4; /* 4 instr penalty */
443 if (total_insns > MAX_INSNS_PER_PATH)
444 return -ENOMEM;
445
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700446 /* If thread sync has been requested, check that it is possible. */
447 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
448 int ret;
449
450 ret = seccomp_can_sync_threads();
451 if (ret)
452 return ret;
453 }
454
Kees Cookc8bee432014-06-27 15:16:33 -0700455 /*
456 * If there is an existing filter, make it the prev and don't drop its
457 * task reference.
458 */
459 filter->prev = current->seccomp.filter;
460 current->seccomp.filter = filter;
461
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700462 /* Now that the new filter is in place, synchronize to all threads. */
463 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
Kees Cookab677c22018-05-03 14:56:12 -0700464 seccomp_sync_threads(flags);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700465
Kees Cookc8bee432014-06-27 15:16:33 -0700466 return 0;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500467}
468
Oleg Nesterovbe69c4c2017-09-27 09:25:30 -0600469void __get_seccomp_filter(struct seccomp_filter *filter)
470{
471 /* Reference count is bounded by the number of total processes. */
472 atomic_inc(&filter->usage);
473}
474
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500475/* get_seccomp_filter - increments the reference count of the filter on @tsk */
476void get_seccomp_filter(struct task_struct *tsk)
477{
478 struct seccomp_filter *orig = tsk->seccomp.filter;
479 if (!orig)
480 return;
Oleg Nesterovbe69c4c2017-09-27 09:25:30 -0600481 __get_seccomp_filter(orig);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500482}
483
Kees Cookc8bee432014-06-27 15:16:33 -0700484static inline void seccomp_filter_free(struct seccomp_filter *filter)
485{
486 if (filter) {
Daniel Borkmannbab18992015-10-02 15:17:33 +0200487 bpf_prog_destroy(filter->prog);
Kees Cookc8bee432014-06-27 15:16:33 -0700488 kfree(filter);
489 }
490}
491
Oleg Nesterovbe69c4c2017-09-27 09:25:30 -0600492static void __put_seccomp_filter(struct seccomp_filter *orig)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500493{
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500494 /* Clean up single-reference branches iteratively. */
495 while (orig && atomic_dec_and_test(&orig->usage)) {
496 struct seccomp_filter *freeme = orig;
497 orig = orig->prev;
Kees Cookc8bee432014-06-27 15:16:33 -0700498 seccomp_filter_free(freeme);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500499 }
500}
Will Drewrybb6ea432012-04-12 16:48:01 -0500501
Oleg Nesterovbe69c4c2017-09-27 09:25:30 -0600502/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
503void put_seccomp_filter(struct task_struct *tsk)
504{
505 __put_seccomp_filter(tsk->seccomp.filter);
506}
507
Will Drewrybb6ea432012-04-12 16:48:01 -0500508/**
509 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
510 * @syscall: syscall number to send to userland
511 * @reason: filter-supplied reason code to send to userland (via si_errno)
512 *
513 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
514 */
515static void seccomp_send_sigsys(int syscall, int reason)
516{
517 struct siginfo info;
518 memset(&info, 0, sizeof(info));
519 info.si_signo = SIGSYS;
520 info.si_code = SYS_SECCOMP;
521 info.si_call_addr = (void __user *)KSTK_EIP(current);
522 info.si_errno = reason;
Eric Paris5e937a92014-03-11 12:48:43 -0400523 info.si_arch = syscall_get_arch();
Will Drewrybb6ea432012-04-12 16:48:01 -0500524 info.si_syscall = syscall;
525 force_sig_info(SIGSYS, &info, current);
526}
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500527#endif /* CONFIG_SECCOMP_FILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529/*
530 * Secure computing mode 1 allows only read/write/exit/sigreturn.
531 * To be fully secure this must be combined with rlimit
532 * to limit the stack allocations too.
533 */
Matt Redfearncb4253a2016-03-29 09:35:34 +0100534static const int mode1_syscalls[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
536 0, /* null terminated */
537};
538
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700539static void __secure_computing_strict(int this_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540{
Matt Redfearncb4253a2016-03-29 09:35:34 +0100541 const int *syscall_whitelist = mode1_syscalls;
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700542#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700543 if (in_compat_syscall())
Matt Redfearnc983f0e2016-03-29 09:35:32 +0100544 syscall_whitelist = get_compat_mode1_syscalls();
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700545#endif
546 do {
547 if (*syscall_whitelist == this_syscall)
548 return;
549 } while (*++syscall_whitelist);
550
551#ifdef SECCOMP_DEBUG
552 dump_stack();
553#endif
554 audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
555 do_exit(SIGKILL);
556}
557
558#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
559void secure_computing_strict(int this_syscall)
560{
561 int mode = current->seccomp.mode;
562
Masahiro Yamada97f26452016-08-03 13:45:50 -0700563 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -0600564 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
565 return;
566
Kees Cook221272f2015-06-15 15:29:16 -0700567 if (mode == SECCOMP_MODE_DISABLED)
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700568 return;
569 else if (mode == SECCOMP_MODE_STRICT)
570 __secure_computing_strict(this_syscall);
571 else
572 BUG();
573}
574#else
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700575
576#ifdef CONFIG_SECCOMP_FILTER
Kees Cookce6526e2016-06-01 19:29:15 -0700577static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
578 const bool recheck_after_trace)
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700579{
580 u32 filter_ret, action;
581 int data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Kees Cook3ba25302014-06-27 15:01:35 -0700583 /*
584 * Make sure that any changes to mode from another thread have
585 * been seen after TIF_SECCOMP was seen.
586 */
587 rmb();
588
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700589 filter_ret = seccomp_run_filters(sd);
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700590 data = filter_ret & SECCOMP_RET_DATA;
591 action = filter_ret & SECCOMP_RET_ACTION;
Andy Lutomirski87b526d2012-10-01 11:40:45 -0700592
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700593 switch (action) {
594 case SECCOMP_RET_ERRNO:
Kees Cook580c57f2015-02-17 13:48:00 -0800595 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
596 if (data > MAX_ERRNO)
597 data = MAX_ERRNO;
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700598 syscall_set_return_value(current, task_pt_regs(current),
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700599 -data, 0);
600 goto skip;
601
602 case SECCOMP_RET_TRAP:
603 /* Show the handler the original registers. */
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700604 syscall_rollback(current, task_pt_regs(current));
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700605 /* Let the filter pass back 16 bits of data. */
606 seccomp_send_sigsys(this_syscall, data);
607 goto skip;
608
609 case SECCOMP_RET_TRACE:
Kees Cookce6526e2016-06-01 19:29:15 -0700610 /* We've been put in this state by the ptracer already. */
611 if (recheck_after_trace)
612 return 0;
613
Kees Cook8112c4f2016-06-01 16:02:17 -0700614 /* ENOSYS these calls if there is no tracer attached. */
615 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
616 syscall_set_return_value(current,
617 task_pt_regs(current),
618 -ENOSYS, 0);
619 goto skip;
620 }
621
622 /* Allow the BPF to provide the event message */
623 ptrace_event(PTRACE_EVENT_SECCOMP, data);
624 /*
625 * The delivery of a fatal signal during event
Kees Cook485a2522016-08-10 16:28:09 -0700626 * notification may silently skip tracer notification,
627 * which could leave us with a potentially unmodified
628 * syscall that the tracer would have liked to have
629 * changed. Since the process is about to die, we just
630 * force the syscall to be skipped and let the signal
631 * kill the process and correctly handle any tracer exit
632 * notifications.
Kees Cook8112c4f2016-06-01 16:02:17 -0700633 */
634 if (fatal_signal_pending(current))
Kees Cook485a2522016-08-10 16:28:09 -0700635 goto skip;
Kees Cook8112c4f2016-06-01 16:02:17 -0700636 /* Check if the tracer forced the syscall to be skipped. */
637 this_syscall = syscall_get_nr(current, task_pt_regs(current));
638 if (this_syscall < 0)
639 goto skip;
640
Kees Cookce6526e2016-06-01 19:29:15 -0700641 /*
642 * Recheck the syscall, since it may have changed. This
643 * intentionally uses a NULL struct seccomp_data to force
644 * a reload of all registers. This does not goto skip since
645 * a skip would have already been reported.
646 */
647 if (__seccomp_filter(this_syscall, NULL, true))
648 return -1;
649
Kees Cook8112c4f2016-06-01 16:02:17 -0700650 return 0;
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700651
652 case SECCOMP_RET_ALLOW:
Kees Cook8112c4f2016-06-01 16:02:17 -0700653 return 0;
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700654
655 case SECCOMP_RET_KILL:
656 default:
657 audit_seccomp(this_syscall, SIGSYS, action);
658 do_exit(SIGSYS);
Will Drewry8156b452012-04-17 14:48:58 -0500659 }
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700660
661 unreachable();
662
663skip:
664 audit_seccomp(this_syscall, 0, action);
Kees Cook8112c4f2016-06-01 16:02:17 -0700665 return -1;
666}
667#else
Kees Cookce6526e2016-06-01 19:29:15 -0700668static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
669 const bool recheck_after_trace)
Kees Cook8112c4f2016-06-01 16:02:17 -0700670{
671 BUG();
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700672}
673#endif
674
Kees Cook8112c4f2016-06-01 16:02:17 -0700675int __secure_computing(const struct seccomp_data *sd)
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700676{
677 int mode = current->seccomp.mode;
Kees Cook8112c4f2016-06-01 16:02:17 -0700678 int this_syscall;
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700679
Masahiro Yamada97f26452016-08-03 13:45:50 -0700680 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -0600681 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
Kees Cook8112c4f2016-06-01 16:02:17 -0700682 return 0;
683
684 this_syscall = sd ? sd->nr :
685 syscall_get_nr(current, task_pt_regs(current));
Tycho Andersen13c4a902015-06-13 09:02:48 -0600686
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700687 switch (mode) {
688 case SECCOMP_MODE_STRICT:
689 __secure_computing_strict(this_syscall); /* may call do_exit */
Kees Cook8112c4f2016-06-01 16:02:17 -0700690 return 0;
Andy Lutomirski13aa72f02014-07-21 18:49:15 -0700691 case SECCOMP_MODE_FILTER:
Kees Cookce6526e2016-06-01 19:29:15 -0700692 return __seccomp_filter(this_syscall, sd, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 default:
694 BUG();
695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696}
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700697#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700698
699long prctl_get_seccomp(void)
700{
701 return current->seccomp.mode;
702}
703
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500704/**
Kees Cook3b23dd12014-06-25 15:55:25 -0700705 * seccomp_set_mode_strict: internal function for setting strict seccomp
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500706 *
707 * Once current->seccomp.mode is non-zero, it may not be changed.
708 *
709 * Returns 0 on success or -EINVAL on failure.
710 */
Kees Cook3b23dd12014-06-25 15:55:25 -0700711static long seccomp_set_mode_strict(void)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700712{
Kees Cook3b23dd12014-06-25 15:55:25 -0700713 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500714 long ret = -EINVAL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700715
Kees Cookdbd952122014-06-27 15:18:48 -0700716 spin_lock_irq(&current->sighand->siglock);
717
Kees Cook1f41b4502014-06-25 15:38:02 -0700718 if (!seccomp_may_assign_mode(seccomp_mode))
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700719 goto out;
720
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700721#ifdef TIF_NOTSC
Kees Cook3b23dd12014-06-25 15:55:25 -0700722 disable_TSC();
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700723#endif
Kees Cookab677c22018-05-03 14:56:12 -0700724 seccomp_assign_mode(current, seccomp_mode, 0);
Kees Cook3b23dd12014-06-25 15:55:25 -0700725 ret = 0;
726
727out:
Kees Cookdbd952122014-06-27 15:18:48 -0700728 spin_unlock_irq(&current->sighand->siglock);
Kees Cook3b23dd12014-06-25 15:55:25 -0700729
730 return ret;
731}
732
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500733#ifdef CONFIG_SECCOMP_FILTER
Kees Cook3b23dd12014-06-25 15:55:25 -0700734/**
735 * seccomp_set_mode_filter: internal function for setting seccomp filter
Kees Cook48dc92b2014-06-25 16:08:24 -0700736 * @flags: flags to change filter behavior
Kees Cook3b23dd12014-06-25 15:55:25 -0700737 * @filter: struct sock_fprog containing filter
738 *
739 * This function may be called repeatedly to install additional filters.
740 * Every filter successfully installed will be evaluated (in reverse order)
741 * for each system call the task makes.
742 *
743 * Once current->seccomp.mode is non-zero, it may not be changed.
744 *
745 * Returns 0 on success or -EINVAL on failure.
746 */
Kees Cook48dc92b2014-06-25 16:08:24 -0700747static long seccomp_set_mode_filter(unsigned int flags,
748 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -0700749{
750 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
Kees Cookc8bee432014-06-27 15:16:33 -0700751 struct seccomp_filter *prepared = NULL;
Kees Cook3b23dd12014-06-25 15:55:25 -0700752 long ret = -EINVAL;
753
Kees Cook48dc92b2014-06-25 16:08:24 -0700754 /* Validate flags. */
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700755 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
Kees Cookdbd952122014-06-27 15:18:48 -0700756 return -EINVAL;
Kees Cook48dc92b2014-06-25 16:08:24 -0700757
Kees Cookc8bee432014-06-27 15:16:33 -0700758 /* Prepare the new filter before holding any locks. */
759 prepared = seccomp_prepare_user_filter(filter);
760 if (IS_ERR(prepared))
761 return PTR_ERR(prepared);
762
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700763 /*
764 * Make sure we cannot change seccomp or nnp state via TSYNC
765 * while another thread is in the middle of calling exec.
766 */
767 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
768 mutex_lock_killable(&current->signal->cred_guard_mutex))
769 goto out_free;
770
Kees Cookdbd952122014-06-27 15:18:48 -0700771 spin_lock_irq(&current->sighand->siglock);
772
Kees Cook3b23dd12014-06-25 15:55:25 -0700773 if (!seccomp_may_assign_mode(seccomp_mode))
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500774 goto out;
Kees Cook3b23dd12014-06-25 15:55:25 -0700775
Kees Cookc8bee432014-06-27 15:16:33 -0700776 ret = seccomp_attach_filter(flags, prepared);
Kees Cook3b23dd12014-06-25 15:55:25 -0700777 if (ret)
778 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700779 /* Do not free the successfully attached filter. */
780 prepared = NULL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700781
Kees Cookab677c22018-05-03 14:56:12 -0700782 seccomp_assign_mode(current, seccomp_mode, flags);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500783out:
Kees Cookdbd952122014-06-27 15:18:48 -0700784 spin_unlock_irq(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700785 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
786 mutex_unlock(&current->signal->cred_guard_mutex);
787out_free:
Kees Cookc8bee432014-06-27 15:16:33 -0700788 seccomp_filter_free(prepared);
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700789 return ret;
790}
Kees Cook3b23dd12014-06-25 15:55:25 -0700791#else
Kees Cook48dc92b2014-06-25 16:08:24 -0700792static inline long seccomp_set_mode_filter(unsigned int flags,
793 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -0700794{
795 return -EINVAL;
796}
797#endif
Kees Cookd78ab022014-05-21 15:02:11 -0700798
Kees Cook48dc92b2014-06-25 16:08:24 -0700799/* Common entry point for both prctl and syscall. */
800static long do_seccomp(unsigned int op, unsigned int flags,
801 const char __user *uargs)
802{
803 switch (op) {
804 case SECCOMP_SET_MODE_STRICT:
805 if (flags != 0 || uargs != NULL)
806 return -EINVAL;
807 return seccomp_set_mode_strict();
808 case SECCOMP_SET_MODE_FILTER:
809 return seccomp_set_mode_filter(flags, uargs);
810 default:
811 return -EINVAL;
812 }
813}
814
815SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
816 const char __user *, uargs)
817{
818 return do_seccomp(op, flags, uargs);
819}
820
Kees Cookd78ab022014-05-21 15:02:11 -0700821/**
822 * prctl_set_seccomp: configures current->seccomp.mode
823 * @seccomp_mode: requested mode to use
824 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
825 *
826 * Returns 0 on success or -EINVAL on failure.
827 */
828long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
829{
Kees Cook48dc92b2014-06-25 16:08:24 -0700830 unsigned int op;
831 char __user *uargs;
832
Kees Cook3b23dd12014-06-25 15:55:25 -0700833 switch (seccomp_mode) {
834 case SECCOMP_MODE_STRICT:
Kees Cook48dc92b2014-06-25 16:08:24 -0700835 op = SECCOMP_SET_MODE_STRICT;
836 /*
837 * Setting strict mode through prctl always ignored filter,
838 * so make sure it is always NULL here to pass the internal
839 * check in do_seccomp().
840 */
841 uargs = NULL;
842 break;
Kees Cook3b23dd12014-06-25 15:55:25 -0700843 case SECCOMP_MODE_FILTER:
Kees Cook48dc92b2014-06-25 16:08:24 -0700844 op = SECCOMP_SET_MODE_FILTER;
845 uargs = filter;
846 break;
Kees Cook3b23dd12014-06-25 15:55:25 -0700847 default:
848 return -EINVAL;
849 }
Kees Cook48dc92b2014-06-25 16:08:24 -0700850
851 /* prctl interface doesn't have flags, so they are always zero. */
852 return do_seccomp(op, 0, uargs);
Kees Cookd78ab022014-05-21 15:02:11 -0700853}
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900854
855#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
856long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
857 void __user *data)
858{
859 struct seccomp_filter *filter;
860 struct sock_fprog_kern *fprog;
861 long ret;
862 unsigned long count = 0;
863
864 if (!capable(CAP_SYS_ADMIN) ||
865 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
866 return -EACCES;
867 }
868
869 spin_lock_irq(&task->sighand->siglock);
870 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
871 ret = -EINVAL;
872 goto out;
873 }
874
875 filter = task->seccomp.filter;
876 while (filter) {
877 filter = filter->prev;
878 count++;
879 }
880
881 if (filter_off >= count) {
882 ret = -ENOENT;
883 goto out;
884 }
885 count -= filter_off;
886
887 filter = task->seccomp.filter;
888 while (filter && count > 1) {
889 filter = filter->prev;
890 count--;
891 }
892
893 if (WARN_ON(count != 1 || !filter)) {
894 /* The filter tree shouldn't shrink while we're using it. */
895 ret = -ENOENT;
896 goto out;
897 }
898
899 fprog = filter->prog->orig_prog;
900 if (!fprog) {
Mickaël Salaün470bf1f2016-03-24 02:46:33 +0100901 /* This must be a new non-cBPF filter, since we save
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900902 * every cBPF filter's orig_prog above when
903 * CONFIG_CHECKPOINT_RESTORE is enabled.
904 */
905 ret = -EMEDIUMTYPE;
906 goto out;
907 }
908
909 ret = fprog->len;
910 if (!data)
911 goto out;
912
Oleg Nesterovbe69c4c2017-09-27 09:25:30 -0600913 __get_seccomp_filter(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900914 spin_unlock_irq(&task->sighand->siglock);
915
916 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
917 ret = -EFAULT;
918
Oleg Nesterovbe69c4c2017-09-27 09:25:30 -0600919 __put_seccomp_filter(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900920 return ret;
921
922out:
923 spin_unlock_irq(&task->sighand->siglock);
924 return ret;
925}
926#endif