blob: 3773562056da267aee91878ed8088b3c577a997e [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011#include <asm/types.h>
12#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010013#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070017#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070019#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070020#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020021#include <sched.h>
22
Mickaël Salaünd02d8982017-02-10 00:21:37 +010023#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070024#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070025
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020026#include <linux/unistd.h>
27#include <linux/filter.h>
28#include <linux/bpf_perf_event.h>
29#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070030
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010031#include <bpf/bpf.h>
32
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020033#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020041#include "../../../include/linux/filter.h"
42
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020043#ifndef ARRAY_SIZE
44# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
45#endif
46
47#define MAX_INSNS 512
48#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070049#define MAX_NR_MAPS 4
Alexei Starovoitovbf508872015-10-07 22:23:23 -070050
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020051#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
52
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070053struct bpf_test {
54 const char *descr;
55 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020056 int fixup_map1[MAX_FIXUPS];
57 int fixup_map2[MAX_FIXUPS];
58 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070059 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070060 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070061 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070063 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070064 ACCEPT,
65 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070066 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070067 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020068 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070069};
70
Josef Bacik48461132016-09-28 10:54:32 -040071/* Note we want this to be 64 bit aligned so that the end of our array is
72 * actually the end of the structure.
73 */
74#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040075
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020076struct test_val {
77 unsigned int index;
78 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040079};
80
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070081static struct bpf_test tests[] = {
82 {
83 "add+sub+mul",
84 .insns = {
85 BPF_MOV64_IMM(BPF_REG_1, 1),
86 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
87 BPF_MOV64_IMM(BPF_REG_2, 3),
88 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
89 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
90 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
91 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
92 BPF_EXIT_INSN(),
93 },
94 .result = ACCEPT,
95 },
96 {
97 "unreachable",
98 .insns = {
99 BPF_EXIT_INSN(),
100 BPF_EXIT_INSN(),
101 },
102 .errstr = "unreachable",
103 .result = REJECT,
104 },
105 {
106 "unreachable2",
107 .insns = {
108 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
109 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
110 BPF_EXIT_INSN(),
111 },
112 .errstr = "unreachable",
113 .result = REJECT,
114 },
115 {
116 "out of range jump",
117 .insns = {
118 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
119 BPF_EXIT_INSN(),
120 },
121 .errstr = "jump out of range",
122 .result = REJECT,
123 },
124 {
125 "out of range jump2",
126 .insns = {
127 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
128 BPF_EXIT_INSN(),
129 },
130 .errstr = "jump out of range",
131 .result = REJECT,
132 },
133 {
134 "test1 ld_imm64",
135 .insns = {
136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
137 BPF_LD_IMM64(BPF_REG_0, 0),
138 BPF_LD_IMM64(BPF_REG_0, 0),
139 BPF_LD_IMM64(BPF_REG_0, 1),
140 BPF_LD_IMM64(BPF_REG_0, 1),
141 BPF_MOV64_IMM(BPF_REG_0, 2),
142 BPF_EXIT_INSN(),
143 },
144 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700145 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700146 .result = REJECT,
147 },
148 {
149 "test2 ld_imm64",
150 .insns = {
151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
152 BPF_LD_IMM64(BPF_REG_0, 0),
153 BPF_LD_IMM64(BPF_REG_0, 0),
154 BPF_LD_IMM64(BPF_REG_0, 1),
155 BPF_LD_IMM64(BPF_REG_0, 1),
156 BPF_EXIT_INSN(),
157 },
158 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700159 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700160 .result = REJECT,
161 },
162 {
163 "test3 ld_imm64",
164 .insns = {
165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
166 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
167 BPF_LD_IMM64(BPF_REG_0, 0),
168 BPF_LD_IMM64(BPF_REG_0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 1),
170 BPF_LD_IMM64(BPF_REG_0, 1),
171 BPF_EXIT_INSN(),
172 },
173 .errstr = "invalid bpf_ld_imm64 insn",
174 .result = REJECT,
175 },
176 {
177 "test4 ld_imm64",
178 .insns = {
179 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
180 BPF_EXIT_INSN(),
181 },
182 .errstr = "invalid bpf_ld_imm64 insn",
183 .result = REJECT,
184 },
185 {
186 "test5 ld_imm64",
187 .insns = {
188 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
189 },
190 .errstr = "invalid bpf_ld_imm64 insn",
191 .result = REJECT,
192 },
193 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200194 "test6 ld_imm64",
195 .insns = {
196 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
197 BPF_RAW_INSN(0, 0, 0, 0, 0),
198 BPF_EXIT_INSN(),
199 },
200 .result = ACCEPT,
201 },
202 {
203 "test7 ld_imm64",
204 .insns = {
205 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
206 BPF_RAW_INSN(0, 0, 0, 0, 1),
207 BPF_EXIT_INSN(),
208 },
209 .result = ACCEPT,
210 },
211 {
212 "test8 ld_imm64",
213 .insns = {
214 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
215 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_EXIT_INSN(),
217 },
218 .errstr = "uses reserved fields",
219 .result = REJECT,
220 },
221 {
222 "test9 ld_imm64",
223 .insns = {
224 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
225 BPF_RAW_INSN(0, 0, 0, 1, 1),
226 BPF_EXIT_INSN(),
227 },
228 .errstr = "invalid bpf_ld_imm64 insn",
229 .result = REJECT,
230 },
231 {
232 "test10 ld_imm64",
233 .insns = {
234 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
235 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
236 BPF_EXIT_INSN(),
237 },
238 .errstr = "invalid bpf_ld_imm64 insn",
239 .result = REJECT,
240 },
241 {
242 "test11 ld_imm64",
243 .insns = {
244 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
245 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
246 BPF_EXIT_INSN(),
247 },
248 .errstr = "invalid bpf_ld_imm64 insn",
249 .result = REJECT,
250 },
251 {
252 "test12 ld_imm64",
253 .insns = {
254 BPF_MOV64_IMM(BPF_REG_1, 0),
255 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
256 BPF_RAW_INSN(0, 0, 0, 0, 1),
257 BPF_EXIT_INSN(),
258 },
259 .errstr = "not pointing to valid bpf_map",
260 .result = REJECT,
261 },
262 {
263 "test13 ld_imm64",
264 .insns = {
265 BPF_MOV64_IMM(BPF_REG_1, 0),
266 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
267 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
268 BPF_EXIT_INSN(),
269 },
270 .errstr = "invalid bpf_ld_imm64 insn",
271 .result = REJECT,
272 },
273 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700274 "no bpf_exit",
275 .insns = {
276 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
277 },
278 .errstr = "jump out of range",
279 .result = REJECT,
280 },
281 {
282 "loop (back-edge)",
283 .insns = {
284 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
285 BPF_EXIT_INSN(),
286 },
287 .errstr = "back-edge",
288 .result = REJECT,
289 },
290 {
291 "loop2 (back-edge)",
292 .insns = {
293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
295 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
296 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
297 BPF_EXIT_INSN(),
298 },
299 .errstr = "back-edge",
300 .result = REJECT,
301 },
302 {
303 "conditional loop",
304 .insns = {
305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
306 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
307 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
309 BPF_EXIT_INSN(),
310 },
311 .errstr = "back-edge",
312 .result = REJECT,
313 },
314 {
315 "read uninitialized register",
316 .insns = {
317 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
318 BPF_EXIT_INSN(),
319 },
320 .errstr = "R2 !read_ok",
321 .result = REJECT,
322 },
323 {
324 "read invalid register",
325 .insns = {
326 BPF_MOV64_REG(BPF_REG_0, -1),
327 BPF_EXIT_INSN(),
328 },
329 .errstr = "R15 is invalid",
330 .result = REJECT,
331 },
332 {
333 "program doesn't init R0 before exit",
334 .insns = {
335 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
336 BPF_EXIT_INSN(),
337 },
338 .errstr = "R0 !read_ok",
339 .result = REJECT,
340 },
341 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700342 "program doesn't init R0 before exit in all branches",
343 .insns = {
344 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
345 BPF_MOV64_IMM(BPF_REG_0, 1),
346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
347 BPF_EXIT_INSN(),
348 },
349 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700350 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700351 .result = REJECT,
352 },
353 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700354 "stack out of bounds",
355 .insns = {
356 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
357 BPF_EXIT_INSN(),
358 },
359 .errstr = "invalid stack",
360 .result = REJECT,
361 },
362 {
363 "invalid call insn1",
364 .insns = {
365 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
366 BPF_EXIT_INSN(),
367 },
368 .errstr = "BPF_CALL uses reserved",
369 .result = REJECT,
370 },
371 {
372 "invalid call insn2",
373 .insns = {
374 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
375 BPF_EXIT_INSN(),
376 },
377 .errstr = "BPF_CALL uses reserved",
378 .result = REJECT,
379 },
380 {
381 "invalid function call",
382 .insns = {
383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
384 BPF_EXIT_INSN(),
385 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100386 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700387 .result = REJECT,
388 },
389 {
390 "uninitialized stack1",
391 .insns = {
392 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
394 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
396 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700397 BPF_EXIT_INSN(),
398 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200399 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700400 .errstr = "invalid indirect read from stack",
401 .result = REJECT,
402 },
403 {
404 "uninitialized stack2",
405 .insns = {
406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
408 BPF_EXIT_INSN(),
409 },
410 .errstr = "invalid read from stack",
411 .result = REJECT,
412 },
413 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200414 "invalid fp arithmetic",
415 /* If this gets ever changed, make sure JITs can deal with it. */
416 .insns = {
417 BPF_MOV64_IMM(BPF_REG_0, 0),
418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
419 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
420 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
421 BPF_EXIT_INSN(),
422 },
423 .errstr_unpriv = "R1 pointer arithmetic",
424 .result_unpriv = REJECT,
425 .errstr = "R1 invalid mem access",
426 .result = REJECT,
427 },
428 {
429 "non-invalid fp arithmetic",
430 .insns = {
431 BPF_MOV64_IMM(BPF_REG_0, 0),
432 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
433 BPF_EXIT_INSN(),
434 },
435 .result = ACCEPT,
436 },
437 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200438 "invalid argument register",
439 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
441 BPF_FUNC_get_cgroup_classid),
442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200444 BPF_EXIT_INSN(),
445 },
446 .errstr = "R1 !read_ok",
447 .result = REJECT,
448 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
449 },
450 {
451 "non-invalid argument register",
452 .insns = {
453 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
455 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200456 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
458 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200459 BPF_EXIT_INSN(),
460 },
461 .result = ACCEPT,
462 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 },
464 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700465 "check valid spill/fill",
466 .insns = {
467 /* spill R1(ctx) into stack */
468 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700469 /* fill it back into R2 */
470 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700471 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100472 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
473 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700474 BPF_EXIT_INSN(),
475 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700476 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700477 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700478 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700479 },
480 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200481 "check valid spill/fill, skb mark",
482 .insns = {
483 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
484 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
485 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
486 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
487 offsetof(struct __sk_buff, mark)),
488 BPF_EXIT_INSN(),
489 },
490 .result = ACCEPT,
491 .result_unpriv = ACCEPT,
492 },
493 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700494 "check corrupted spill/fill",
495 .insns = {
496 /* spill R1(ctx) into stack */
497 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700498 /* mess up with R1 pointer on stack */
499 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700500 /* fill back into R0 should fail */
501 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700502 BPF_EXIT_INSN(),
503 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700504 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700505 .errstr = "corrupted spill",
506 .result = REJECT,
507 },
508 {
509 "invalid src register in STX",
510 .insns = {
511 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
512 BPF_EXIT_INSN(),
513 },
514 .errstr = "R15 is invalid",
515 .result = REJECT,
516 },
517 {
518 "invalid dst register in STX",
519 .insns = {
520 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
521 BPF_EXIT_INSN(),
522 },
523 .errstr = "R14 is invalid",
524 .result = REJECT,
525 },
526 {
527 "invalid dst register in ST",
528 .insns = {
529 BPF_ST_MEM(BPF_B, 14, -1, -1),
530 BPF_EXIT_INSN(),
531 },
532 .errstr = "R14 is invalid",
533 .result = REJECT,
534 },
535 {
536 "invalid src register in LDX",
537 .insns = {
538 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
539 BPF_EXIT_INSN(),
540 },
541 .errstr = "R12 is invalid",
542 .result = REJECT,
543 },
544 {
545 "invalid dst register in LDX",
546 .insns = {
547 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
548 BPF_EXIT_INSN(),
549 },
550 .errstr = "R11 is invalid",
551 .result = REJECT,
552 },
553 {
554 "junk insn",
555 .insns = {
556 BPF_RAW_INSN(0, 0, 0, 0, 0),
557 BPF_EXIT_INSN(),
558 },
559 .errstr = "invalid BPF_LD_IMM",
560 .result = REJECT,
561 },
562 {
563 "junk insn2",
564 .insns = {
565 BPF_RAW_INSN(1, 0, 0, 0, 0),
566 BPF_EXIT_INSN(),
567 },
568 .errstr = "BPF_LDX uses reserved fields",
569 .result = REJECT,
570 },
571 {
572 "junk insn3",
573 .insns = {
574 BPF_RAW_INSN(-1, 0, 0, 0, 0),
575 BPF_EXIT_INSN(),
576 },
577 .errstr = "invalid BPF_ALU opcode f0",
578 .result = REJECT,
579 },
580 {
581 "junk insn4",
582 .insns = {
583 BPF_RAW_INSN(-1, -1, -1, -1, -1),
584 BPF_EXIT_INSN(),
585 },
586 .errstr = "invalid BPF_ALU opcode f0",
587 .result = REJECT,
588 },
589 {
590 "junk insn5",
591 .insns = {
592 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
593 BPF_EXIT_INSN(),
594 },
595 .errstr = "BPF_ALU uses reserved fields",
596 .result = REJECT,
597 },
598 {
599 "misaligned read from stack",
600 .insns = {
601 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
602 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
603 BPF_EXIT_INSN(),
604 },
605 .errstr = "misaligned access",
606 .result = REJECT,
607 },
608 {
609 "invalid map_fd for function call",
610 .insns = {
611 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
612 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
614 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
616 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700617 BPF_EXIT_INSN(),
618 },
619 .errstr = "fd 0 is not pointing to valid bpf_map",
620 .result = REJECT,
621 },
622 {
623 "don't check return value before access",
624 .insns = {
625 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
628 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200629 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
630 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700631 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
632 BPF_EXIT_INSN(),
633 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200634 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700635 .errstr = "R0 invalid mem access 'map_value_or_null'",
636 .result = REJECT,
637 },
638 {
639 "access memory with incorrect alignment",
640 .insns = {
641 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
642 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
644 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
646 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
648 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
649 BPF_EXIT_INSN(),
650 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200651 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700652 .errstr = "misaligned access",
653 .result = REJECT,
654 },
655 {
656 "sometimes access memory with incorrect alignment",
657 .insns = {
658 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
661 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200662 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
663 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
665 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
666 BPF_EXIT_INSN(),
667 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
668 BPF_EXIT_INSN(),
669 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200670 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700671 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700672 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700673 .result = REJECT,
674 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700675 {
676 "jump test 1",
677 .insns = {
678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
679 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
681 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
683 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
685 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
687 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
689 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
691 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
692 BPF_MOV64_IMM(BPF_REG_0, 0),
693 BPF_EXIT_INSN(),
694 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700695 .errstr_unpriv = "R1 pointer comparison",
696 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700697 .result = ACCEPT,
698 },
699 {
700 "jump test 2",
701 .insns = {
702 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
703 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
704 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
705 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
706 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
707 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
708 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
709 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
710 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
711 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
713 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
714 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
716 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
717 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
718 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
719 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
720 BPF_MOV64_IMM(BPF_REG_0, 0),
721 BPF_EXIT_INSN(),
722 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700723 .errstr_unpriv = "R1 pointer comparison",
724 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700725 .result = ACCEPT,
726 },
727 {
728 "jump test 3",
729 .insns = {
730 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
731 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
732 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
734 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
736 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
738 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
740 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
742 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
743 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
744 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
746 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
748 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
750 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
751 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
752 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
754 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
756 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700757 BPF_EXIT_INSN(),
758 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200759 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700760 .errstr_unpriv = "R1 pointer comparison",
761 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700762 .result = ACCEPT,
763 },
764 {
765 "jump test 4",
766 .insns = {
767 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
769 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
807 BPF_MOV64_IMM(BPF_REG_0, 0),
808 BPF_EXIT_INSN(),
809 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700810 .errstr_unpriv = "R1 pointer comparison",
811 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700812 .result = ACCEPT,
813 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700814 {
815 "jump test 5",
816 .insns = {
817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
818 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
819 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
820 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
821 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
822 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
823 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
824 BPF_MOV64_IMM(BPF_REG_0, 0),
825 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
826 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
827 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
828 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
829 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
830 BPF_MOV64_IMM(BPF_REG_0, 0),
831 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
832 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
833 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
834 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
835 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
836 BPF_MOV64_IMM(BPF_REG_0, 0),
837 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
838 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
839 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
840 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
841 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
842 BPF_MOV64_IMM(BPF_REG_0, 0),
843 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
844 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
845 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
846 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
847 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
848 BPF_MOV64_IMM(BPF_REG_0, 0),
849 BPF_EXIT_INSN(),
850 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700851 .errstr_unpriv = "R1 pointer comparison",
852 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700853 .result = ACCEPT,
854 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700855 {
856 "access skb fields ok",
857 .insns = {
858 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
859 offsetof(struct __sk_buff, len)),
860 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
861 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
862 offsetof(struct __sk_buff, mark)),
863 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
864 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
865 offsetof(struct __sk_buff, pkt_type)),
866 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
867 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
868 offsetof(struct __sk_buff, queue_mapping)),
869 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700870 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
871 offsetof(struct __sk_buff, protocol)),
872 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
873 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
874 offsetof(struct __sk_buff, vlan_present)),
875 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
876 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
877 offsetof(struct __sk_buff, vlan_tci)),
878 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200879 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
880 offsetof(struct __sk_buff, napi_id)),
881 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700882 BPF_EXIT_INSN(),
883 },
884 .result = ACCEPT,
885 },
886 {
887 "access skb fields bad1",
888 .insns = {
889 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
890 BPF_EXIT_INSN(),
891 },
892 .errstr = "invalid bpf_context access",
893 .result = REJECT,
894 },
895 {
896 "access skb fields bad2",
897 .insns = {
898 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
899 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
900 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
902 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200903 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
904 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700905 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
906 BPF_EXIT_INSN(),
907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, pkt_type)),
910 BPF_EXIT_INSN(),
911 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200912 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700913 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700914 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700915 .result = REJECT,
916 },
917 {
918 "access skb fields bad3",
919 .insns = {
920 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
921 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
922 offsetof(struct __sk_buff, pkt_type)),
923 BPF_EXIT_INSN(),
924 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
927 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
929 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700930 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
931 BPF_EXIT_INSN(),
932 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
933 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
934 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200935 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700936 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700937 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700938 .result = REJECT,
939 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700940 {
941 "access skb fields bad4",
942 .insns = {
943 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
944 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
945 offsetof(struct __sk_buff, len)),
946 BPF_MOV64_IMM(BPF_REG_0, 0),
947 BPF_EXIT_INSN(),
948 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
951 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
953 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700954 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
955 BPF_EXIT_INSN(),
956 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
957 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
958 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200959 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700960 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700961 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700962 .result = REJECT,
963 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700964 {
965 "check skb->mark is not writeable by sockets",
966 .insns = {
967 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
968 offsetof(struct __sk_buff, mark)),
969 BPF_EXIT_INSN(),
970 },
971 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700972 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700973 .result = REJECT,
974 },
975 {
976 "check skb->tc_index is not writeable by sockets",
977 .insns = {
978 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
979 offsetof(struct __sk_buff, tc_index)),
980 BPF_EXIT_INSN(),
981 },
982 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700983 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700984 .result = REJECT,
985 },
986 {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100987 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700988 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100989 BPF_MOV64_IMM(BPF_REG_0, 0),
990 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
991 offsetof(struct __sk_buff, cb[0])),
992 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
993 offsetof(struct __sk_buff, cb[0]) + 1),
994 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
995 offsetof(struct __sk_buff, cb[0]) + 2),
996 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
997 offsetof(struct __sk_buff, cb[0]) + 3),
998 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
999 offsetof(struct __sk_buff, cb[1])),
1000 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1001 offsetof(struct __sk_buff, cb[1]) + 1),
1002 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1003 offsetof(struct __sk_buff, cb[1]) + 2),
1004 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1005 offsetof(struct __sk_buff, cb[1]) + 3),
1006 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1007 offsetof(struct __sk_buff, cb[2])),
1008 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1009 offsetof(struct __sk_buff, cb[2]) + 1),
1010 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1011 offsetof(struct __sk_buff, cb[2]) + 2),
1012 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1013 offsetof(struct __sk_buff, cb[2]) + 3),
1014 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1015 offsetof(struct __sk_buff, cb[3])),
1016 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1017 offsetof(struct __sk_buff, cb[3]) + 1),
1018 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1019 offsetof(struct __sk_buff, cb[3]) + 2),
1020 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1021 offsetof(struct __sk_buff, cb[3]) + 3),
1022 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1023 offsetof(struct __sk_buff, cb[4])),
1024 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1025 offsetof(struct __sk_buff, cb[4]) + 1),
1026 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1027 offsetof(struct __sk_buff, cb[4]) + 2),
1028 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1029 offsetof(struct __sk_buff, cb[4]) + 3),
1030 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1031 offsetof(struct __sk_buff, cb[0])),
1032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1033 offsetof(struct __sk_buff, cb[0]) + 1),
1034 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1035 offsetof(struct __sk_buff, cb[0]) + 2),
1036 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1037 offsetof(struct __sk_buff, cb[0]) + 3),
1038 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1039 offsetof(struct __sk_buff, cb[1])),
1040 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1041 offsetof(struct __sk_buff, cb[1]) + 1),
1042 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1043 offsetof(struct __sk_buff, cb[1]) + 2),
1044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1045 offsetof(struct __sk_buff, cb[1]) + 3),
1046 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1047 offsetof(struct __sk_buff, cb[2])),
1048 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1049 offsetof(struct __sk_buff, cb[2]) + 1),
1050 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1051 offsetof(struct __sk_buff, cb[2]) + 2),
1052 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, cb[2]) + 3),
1054 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1055 offsetof(struct __sk_buff, cb[3])),
1056 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1057 offsetof(struct __sk_buff, cb[3]) + 1),
1058 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1059 offsetof(struct __sk_buff, cb[3]) + 2),
1060 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1061 offsetof(struct __sk_buff, cb[3]) + 3),
1062 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1063 offsetof(struct __sk_buff, cb[4])),
1064 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1065 offsetof(struct __sk_buff, cb[4]) + 1),
1066 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1067 offsetof(struct __sk_buff, cb[4]) + 2),
1068 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1069 offsetof(struct __sk_buff, cb[4]) + 3),
1070 BPF_EXIT_INSN(),
1071 },
1072 .result = ACCEPT,
1073 },
1074 {
1075 "check cb access: byte, oob 1",
1076 .insns = {
1077 BPF_MOV64_IMM(BPF_REG_0, 0),
1078 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1079 offsetof(struct __sk_buff, cb[4]) + 4),
1080 BPF_EXIT_INSN(),
1081 },
1082 .errstr = "invalid bpf_context access",
1083 .result = REJECT,
1084 },
1085 {
1086 "check cb access: byte, oob 2",
1087 .insns = {
1088 BPF_MOV64_IMM(BPF_REG_0, 0),
1089 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1090 offsetof(struct __sk_buff, cb[0]) - 1),
1091 BPF_EXIT_INSN(),
1092 },
1093 .errstr = "invalid bpf_context access",
1094 .result = REJECT,
1095 },
1096 {
1097 "check cb access: byte, oob 3",
1098 .insns = {
1099 BPF_MOV64_IMM(BPF_REG_0, 0),
1100 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1101 offsetof(struct __sk_buff, cb[4]) + 4),
1102 BPF_EXIT_INSN(),
1103 },
1104 .errstr = "invalid bpf_context access",
1105 .result = REJECT,
1106 },
1107 {
1108 "check cb access: byte, oob 4",
1109 .insns = {
1110 BPF_MOV64_IMM(BPF_REG_0, 0),
1111 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1112 offsetof(struct __sk_buff, cb[0]) - 1),
1113 BPF_EXIT_INSN(),
1114 },
1115 .errstr = "invalid bpf_context access",
1116 .result = REJECT,
1117 },
1118 {
1119 "check cb access: byte, wrong type",
1120 .insns = {
1121 BPF_MOV64_IMM(BPF_REG_0, 0),
1122 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001123 offsetof(struct __sk_buff, cb[0])),
1124 BPF_EXIT_INSN(),
1125 },
1126 .errstr = "invalid bpf_context access",
1127 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001128 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1129 },
1130 {
1131 "check cb access: half",
1132 .insns = {
1133 BPF_MOV64_IMM(BPF_REG_0, 0),
1134 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1135 offsetof(struct __sk_buff, cb[0])),
1136 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1137 offsetof(struct __sk_buff, cb[0]) + 2),
1138 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1139 offsetof(struct __sk_buff, cb[1])),
1140 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1141 offsetof(struct __sk_buff, cb[1]) + 2),
1142 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1143 offsetof(struct __sk_buff, cb[2])),
1144 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1145 offsetof(struct __sk_buff, cb[2]) + 2),
1146 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1147 offsetof(struct __sk_buff, cb[3])),
1148 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1149 offsetof(struct __sk_buff, cb[3]) + 2),
1150 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1151 offsetof(struct __sk_buff, cb[4])),
1152 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1153 offsetof(struct __sk_buff, cb[4]) + 2),
1154 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1155 offsetof(struct __sk_buff, cb[0])),
1156 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1157 offsetof(struct __sk_buff, cb[0]) + 2),
1158 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1159 offsetof(struct __sk_buff, cb[1])),
1160 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1161 offsetof(struct __sk_buff, cb[1]) + 2),
1162 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1163 offsetof(struct __sk_buff, cb[2])),
1164 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1165 offsetof(struct __sk_buff, cb[2]) + 2),
1166 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1167 offsetof(struct __sk_buff, cb[3])),
1168 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1169 offsetof(struct __sk_buff, cb[3]) + 2),
1170 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1171 offsetof(struct __sk_buff, cb[4])),
1172 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1173 offsetof(struct __sk_buff, cb[4]) + 2),
1174 BPF_EXIT_INSN(),
1175 },
1176 .result = ACCEPT,
1177 },
1178 {
1179 "check cb access: half, unaligned",
1180 .insns = {
1181 BPF_MOV64_IMM(BPF_REG_0, 0),
1182 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1183 offsetof(struct __sk_buff, cb[0]) + 1),
1184 BPF_EXIT_INSN(),
1185 },
1186 .errstr = "misaligned access",
1187 .result = REJECT,
1188 },
1189 {
1190 "check cb access: half, oob 1",
1191 .insns = {
1192 BPF_MOV64_IMM(BPF_REG_0, 0),
1193 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1194 offsetof(struct __sk_buff, cb[4]) + 4),
1195 BPF_EXIT_INSN(),
1196 },
1197 .errstr = "invalid bpf_context access",
1198 .result = REJECT,
1199 },
1200 {
1201 "check cb access: half, oob 2",
1202 .insns = {
1203 BPF_MOV64_IMM(BPF_REG_0, 0),
1204 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1205 offsetof(struct __sk_buff, cb[0]) - 2),
1206 BPF_EXIT_INSN(),
1207 },
1208 .errstr = "invalid bpf_context access",
1209 .result = REJECT,
1210 },
1211 {
1212 "check cb access: half, oob 3",
1213 .insns = {
1214 BPF_MOV64_IMM(BPF_REG_0, 0),
1215 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1216 offsetof(struct __sk_buff, cb[4]) + 4),
1217 BPF_EXIT_INSN(),
1218 },
1219 .errstr = "invalid bpf_context access",
1220 .result = REJECT,
1221 },
1222 {
1223 "check cb access: half, oob 4",
1224 .insns = {
1225 BPF_MOV64_IMM(BPF_REG_0, 0),
1226 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1227 offsetof(struct __sk_buff, cb[0]) - 2),
1228 BPF_EXIT_INSN(),
1229 },
1230 .errstr = "invalid bpf_context access",
1231 .result = REJECT,
1232 },
1233 {
1234 "check cb access: half, wrong type",
1235 .insns = {
1236 BPF_MOV64_IMM(BPF_REG_0, 0),
1237 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1238 offsetof(struct __sk_buff, cb[0])),
1239 BPF_EXIT_INSN(),
1240 },
1241 .errstr = "invalid bpf_context access",
1242 .result = REJECT,
1243 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1244 },
1245 {
1246 "check cb access: word",
1247 .insns = {
1248 BPF_MOV64_IMM(BPF_REG_0, 0),
1249 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1250 offsetof(struct __sk_buff, cb[0])),
1251 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1252 offsetof(struct __sk_buff, cb[1])),
1253 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1254 offsetof(struct __sk_buff, cb[2])),
1255 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1256 offsetof(struct __sk_buff, cb[3])),
1257 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1258 offsetof(struct __sk_buff, cb[4])),
1259 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1260 offsetof(struct __sk_buff, cb[0])),
1261 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1262 offsetof(struct __sk_buff, cb[1])),
1263 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1264 offsetof(struct __sk_buff, cb[2])),
1265 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1266 offsetof(struct __sk_buff, cb[3])),
1267 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1268 offsetof(struct __sk_buff, cb[4])),
1269 BPF_EXIT_INSN(),
1270 },
1271 .result = ACCEPT,
1272 },
1273 {
1274 "check cb access: word, unaligned 1",
1275 .insns = {
1276 BPF_MOV64_IMM(BPF_REG_0, 0),
1277 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1278 offsetof(struct __sk_buff, cb[0]) + 2),
1279 BPF_EXIT_INSN(),
1280 },
1281 .errstr = "misaligned access",
1282 .result = REJECT,
1283 },
1284 {
1285 "check cb access: word, unaligned 2",
1286 .insns = {
1287 BPF_MOV64_IMM(BPF_REG_0, 0),
1288 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1289 offsetof(struct __sk_buff, cb[4]) + 1),
1290 BPF_EXIT_INSN(),
1291 },
1292 .errstr = "misaligned access",
1293 .result = REJECT,
1294 },
1295 {
1296 "check cb access: word, unaligned 3",
1297 .insns = {
1298 BPF_MOV64_IMM(BPF_REG_0, 0),
1299 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1300 offsetof(struct __sk_buff, cb[4]) + 2),
1301 BPF_EXIT_INSN(),
1302 },
1303 .errstr = "misaligned access",
1304 .result = REJECT,
1305 },
1306 {
1307 "check cb access: word, unaligned 4",
1308 .insns = {
1309 BPF_MOV64_IMM(BPF_REG_0, 0),
1310 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1311 offsetof(struct __sk_buff, cb[4]) + 3),
1312 BPF_EXIT_INSN(),
1313 },
1314 .errstr = "misaligned access",
1315 .result = REJECT,
1316 },
1317 {
1318 "check cb access: double",
1319 .insns = {
1320 BPF_MOV64_IMM(BPF_REG_0, 0),
1321 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1322 offsetof(struct __sk_buff, cb[0])),
1323 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1324 offsetof(struct __sk_buff, cb[2])),
1325 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1326 offsetof(struct __sk_buff, cb[0])),
1327 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1328 offsetof(struct __sk_buff, cb[2])),
1329 BPF_EXIT_INSN(),
1330 },
1331 .result = ACCEPT,
1332 },
1333 {
1334 "check cb access: double, unaligned 1",
1335 .insns = {
1336 BPF_MOV64_IMM(BPF_REG_0, 0),
1337 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1338 offsetof(struct __sk_buff, cb[1])),
1339 BPF_EXIT_INSN(),
1340 },
1341 .errstr = "misaligned access",
1342 .result = REJECT,
1343 },
1344 {
1345 "check cb access: double, unaligned 2",
1346 .insns = {
1347 BPF_MOV64_IMM(BPF_REG_0, 0),
1348 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1349 offsetof(struct __sk_buff, cb[3])),
1350 BPF_EXIT_INSN(),
1351 },
1352 .errstr = "misaligned access",
1353 .result = REJECT,
1354 },
1355 {
1356 "check cb access: double, oob 1",
1357 .insns = {
1358 BPF_MOV64_IMM(BPF_REG_0, 0),
1359 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1360 offsetof(struct __sk_buff, cb[4])),
1361 BPF_EXIT_INSN(),
1362 },
1363 .errstr = "invalid bpf_context access",
1364 .result = REJECT,
1365 },
1366 {
1367 "check cb access: double, oob 2",
1368 .insns = {
1369 BPF_MOV64_IMM(BPF_REG_0, 0),
1370 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1371 offsetof(struct __sk_buff, cb[4]) + 8),
1372 BPF_EXIT_INSN(),
1373 },
1374 .errstr = "invalid bpf_context access",
1375 .result = REJECT,
1376 },
1377 {
1378 "check cb access: double, oob 3",
1379 .insns = {
1380 BPF_MOV64_IMM(BPF_REG_0, 0),
1381 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1382 offsetof(struct __sk_buff, cb[0]) - 8),
1383 BPF_EXIT_INSN(),
1384 },
1385 .errstr = "invalid bpf_context access",
1386 .result = REJECT,
1387 },
1388 {
1389 "check cb access: double, oob 4",
1390 .insns = {
1391 BPF_MOV64_IMM(BPF_REG_0, 0),
1392 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1393 offsetof(struct __sk_buff, cb[4])),
1394 BPF_EXIT_INSN(),
1395 },
1396 .errstr = "invalid bpf_context access",
1397 .result = REJECT,
1398 },
1399 {
1400 "check cb access: double, oob 5",
1401 .insns = {
1402 BPF_MOV64_IMM(BPF_REG_0, 0),
1403 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1404 offsetof(struct __sk_buff, cb[4]) + 8),
1405 BPF_EXIT_INSN(),
1406 },
1407 .errstr = "invalid bpf_context access",
1408 .result = REJECT,
1409 },
1410 {
1411 "check cb access: double, oob 6",
1412 .insns = {
1413 BPF_MOV64_IMM(BPF_REG_0, 0),
1414 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1415 offsetof(struct __sk_buff, cb[0]) - 8),
1416 BPF_EXIT_INSN(),
1417 },
1418 .errstr = "invalid bpf_context access",
1419 .result = REJECT,
1420 },
1421 {
1422 "check cb access: double, wrong type",
1423 .insns = {
1424 BPF_MOV64_IMM(BPF_REG_0, 0),
1425 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1426 offsetof(struct __sk_buff, cb[0])),
1427 BPF_EXIT_INSN(),
1428 },
1429 .errstr = "invalid bpf_context access",
1430 .result = REJECT,
1431 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001432 },
1433 {
1434 "check out of range skb->cb access",
1435 .insns = {
1436 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001437 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001438 BPF_EXIT_INSN(),
1439 },
1440 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001441 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001442 .result = REJECT,
1443 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1444 },
1445 {
1446 "write skb fields from socket prog",
1447 .insns = {
1448 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1449 offsetof(struct __sk_buff, cb[4])),
1450 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1451 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1452 offsetof(struct __sk_buff, mark)),
1453 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1454 offsetof(struct __sk_buff, tc_index)),
1455 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1456 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1457 offsetof(struct __sk_buff, cb[0])),
1458 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1459 offsetof(struct __sk_buff, cb[2])),
1460 BPF_EXIT_INSN(),
1461 },
1462 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001463 .errstr_unpriv = "R1 leaks addr",
1464 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001465 },
1466 {
1467 "write skb fields from tc_cls_act prog",
1468 .insns = {
1469 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1470 offsetof(struct __sk_buff, cb[0])),
1471 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1472 offsetof(struct __sk_buff, mark)),
1473 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1474 offsetof(struct __sk_buff, tc_index)),
1475 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1476 offsetof(struct __sk_buff, tc_index)),
1477 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1478 offsetof(struct __sk_buff, cb[3])),
1479 BPF_EXIT_INSN(),
1480 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001481 .errstr_unpriv = "",
1482 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001483 .result = ACCEPT,
1484 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1485 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001486 {
1487 "PTR_TO_STACK store/load",
1488 .insns = {
1489 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1491 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1492 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1493 BPF_EXIT_INSN(),
1494 },
1495 .result = ACCEPT,
1496 },
1497 {
1498 "PTR_TO_STACK store/load - bad alignment on off",
1499 .insns = {
1500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1502 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1504 BPF_EXIT_INSN(),
1505 },
1506 .result = REJECT,
1507 .errstr = "misaligned access off -6 size 8",
1508 },
1509 {
1510 "PTR_TO_STACK store/load - bad alignment on reg",
1511 .insns = {
1512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1514 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1515 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1516 BPF_EXIT_INSN(),
1517 },
1518 .result = REJECT,
1519 .errstr = "misaligned access off -2 size 8",
1520 },
1521 {
1522 "PTR_TO_STACK store/load - out of bounds low",
1523 .insns = {
1524 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1526 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1527 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1528 BPF_EXIT_INSN(),
1529 },
1530 .result = REJECT,
1531 .errstr = "invalid stack off=-79992 size=8",
1532 },
1533 {
1534 "PTR_TO_STACK store/load - out of bounds high",
1535 .insns = {
1536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1538 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1539 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1540 BPF_EXIT_INSN(),
1541 },
1542 .result = REJECT,
1543 .errstr = "invalid stack off=0 size=8",
1544 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001545 {
1546 "unpriv: return pointer",
1547 .insns = {
1548 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1549 BPF_EXIT_INSN(),
1550 },
1551 .result = ACCEPT,
1552 .result_unpriv = REJECT,
1553 .errstr_unpriv = "R0 leaks addr",
1554 },
1555 {
1556 "unpriv: add const to pointer",
1557 .insns = {
1558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1559 BPF_MOV64_IMM(BPF_REG_0, 0),
1560 BPF_EXIT_INSN(),
1561 },
1562 .result = ACCEPT,
1563 .result_unpriv = REJECT,
1564 .errstr_unpriv = "R1 pointer arithmetic",
1565 },
1566 {
1567 "unpriv: add pointer to pointer",
1568 .insns = {
1569 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1570 BPF_MOV64_IMM(BPF_REG_0, 0),
1571 BPF_EXIT_INSN(),
1572 },
1573 .result = ACCEPT,
1574 .result_unpriv = REJECT,
1575 .errstr_unpriv = "R1 pointer arithmetic",
1576 },
1577 {
1578 "unpriv: neg pointer",
1579 .insns = {
1580 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1581 BPF_MOV64_IMM(BPF_REG_0, 0),
1582 BPF_EXIT_INSN(),
1583 },
1584 .result = ACCEPT,
1585 .result_unpriv = REJECT,
1586 .errstr_unpriv = "R1 pointer arithmetic",
1587 },
1588 {
1589 "unpriv: cmp pointer with const",
1590 .insns = {
1591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1592 BPF_MOV64_IMM(BPF_REG_0, 0),
1593 BPF_EXIT_INSN(),
1594 },
1595 .result = ACCEPT,
1596 .result_unpriv = REJECT,
1597 .errstr_unpriv = "R1 pointer comparison",
1598 },
1599 {
1600 "unpriv: cmp pointer with pointer",
1601 .insns = {
1602 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1603 BPF_MOV64_IMM(BPF_REG_0, 0),
1604 BPF_EXIT_INSN(),
1605 },
1606 .result = ACCEPT,
1607 .result_unpriv = REJECT,
1608 .errstr_unpriv = "R10 pointer comparison",
1609 },
1610 {
1611 "unpriv: check that printk is disallowed",
1612 .insns = {
1613 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1616 BPF_MOV64_IMM(BPF_REG_2, 8),
1617 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1619 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001620 BPF_MOV64_IMM(BPF_REG_0, 0),
1621 BPF_EXIT_INSN(),
1622 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001623 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001624 .result_unpriv = REJECT,
1625 .result = ACCEPT,
1626 },
1627 {
1628 "unpriv: pass pointer to helper function",
1629 .insns = {
1630 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1631 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1633 BPF_LD_MAP_FD(BPF_REG_1, 0),
1634 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1635 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1637 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001638 BPF_MOV64_IMM(BPF_REG_0, 0),
1639 BPF_EXIT_INSN(),
1640 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001641 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001642 .errstr_unpriv = "R4 leaks addr",
1643 .result_unpriv = REJECT,
1644 .result = ACCEPT,
1645 },
1646 {
1647 "unpriv: indirectly pass pointer on stack to helper function",
1648 .insns = {
1649 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1650 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1652 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1654 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001655 BPF_MOV64_IMM(BPF_REG_0, 0),
1656 BPF_EXIT_INSN(),
1657 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001658 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001659 .errstr = "invalid indirect read from stack off -8+0 size 8",
1660 .result = REJECT,
1661 },
1662 {
1663 "unpriv: mangle pointer on stack 1",
1664 .insns = {
1665 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1666 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1667 BPF_MOV64_IMM(BPF_REG_0, 0),
1668 BPF_EXIT_INSN(),
1669 },
1670 .errstr_unpriv = "attempt to corrupt spilled",
1671 .result_unpriv = REJECT,
1672 .result = ACCEPT,
1673 },
1674 {
1675 "unpriv: mangle pointer on stack 2",
1676 .insns = {
1677 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1678 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1679 BPF_MOV64_IMM(BPF_REG_0, 0),
1680 BPF_EXIT_INSN(),
1681 },
1682 .errstr_unpriv = "attempt to corrupt spilled",
1683 .result_unpriv = REJECT,
1684 .result = ACCEPT,
1685 },
1686 {
1687 "unpriv: read pointer from stack in small chunks",
1688 .insns = {
1689 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1690 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1691 BPF_MOV64_IMM(BPF_REG_0, 0),
1692 BPF_EXIT_INSN(),
1693 },
1694 .errstr = "invalid size",
1695 .result = REJECT,
1696 },
1697 {
1698 "unpriv: write pointer into ctx",
1699 .insns = {
1700 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1701 BPF_MOV64_IMM(BPF_REG_0, 0),
1702 BPF_EXIT_INSN(),
1703 },
1704 .errstr_unpriv = "R1 leaks addr",
1705 .result_unpriv = REJECT,
1706 .errstr = "invalid bpf_context access",
1707 .result = REJECT,
1708 },
1709 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001710 "unpriv: spill/fill of ctx",
1711 .insns = {
1712 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1714 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1715 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1716 BPF_MOV64_IMM(BPF_REG_0, 0),
1717 BPF_EXIT_INSN(),
1718 },
1719 .result = ACCEPT,
1720 },
1721 {
1722 "unpriv: spill/fill of ctx 2",
1723 .insns = {
1724 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1726 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1727 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1729 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001730 BPF_EXIT_INSN(),
1731 },
1732 .result = ACCEPT,
1733 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1734 },
1735 {
1736 "unpriv: spill/fill of ctx 3",
1737 .insns = {
1738 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1740 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1741 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1742 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1744 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001745 BPF_EXIT_INSN(),
1746 },
1747 .result = REJECT,
1748 .errstr = "R1 type=fp expected=ctx",
1749 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1750 },
1751 {
1752 "unpriv: spill/fill of ctx 4",
1753 .insns = {
1754 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1756 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1757 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001758 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1759 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001760 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1762 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001763 BPF_EXIT_INSN(),
1764 },
1765 .result = REJECT,
1766 .errstr = "R1 type=inv expected=ctx",
1767 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1768 },
1769 {
1770 "unpriv: spill/fill of different pointers stx",
1771 .insns = {
1772 BPF_MOV64_IMM(BPF_REG_3, 42),
1773 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1776 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1778 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1779 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1780 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1781 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1782 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1783 offsetof(struct __sk_buff, mark)),
1784 BPF_MOV64_IMM(BPF_REG_0, 0),
1785 BPF_EXIT_INSN(),
1786 },
1787 .result = REJECT,
1788 .errstr = "same insn cannot be used with different pointers",
1789 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1790 },
1791 {
1792 "unpriv: spill/fill of different pointers ldx",
1793 .insns = {
1794 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1797 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1799 -(__s32)offsetof(struct bpf_perf_event_data,
1800 sample_period) - 8),
1801 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1802 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1803 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1804 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1805 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1806 offsetof(struct bpf_perf_event_data,
1807 sample_period)),
1808 BPF_MOV64_IMM(BPF_REG_0, 0),
1809 BPF_EXIT_INSN(),
1810 },
1811 .result = REJECT,
1812 .errstr = "same insn cannot be used with different pointers",
1813 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1814 },
1815 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001816 "unpriv: write pointer into map elem value",
1817 .insns = {
1818 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1819 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1821 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1823 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1825 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1826 BPF_EXIT_INSN(),
1827 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001828 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001829 .errstr_unpriv = "R0 leaks addr",
1830 .result_unpriv = REJECT,
1831 .result = ACCEPT,
1832 },
1833 {
1834 "unpriv: partial copy of pointer",
1835 .insns = {
1836 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1837 BPF_MOV64_IMM(BPF_REG_0, 0),
1838 BPF_EXIT_INSN(),
1839 },
1840 .errstr_unpriv = "R10 partial copy",
1841 .result_unpriv = REJECT,
1842 .result = ACCEPT,
1843 },
1844 {
1845 "unpriv: pass pointer to tail_call",
1846 .insns = {
1847 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1848 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001849 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1850 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001851 BPF_MOV64_IMM(BPF_REG_0, 0),
1852 BPF_EXIT_INSN(),
1853 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001854 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001855 .errstr_unpriv = "R3 leaks addr into helper",
1856 .result_unpriv = REJECT,
1857 .result = ACCEPT,
1858 },
1859 {
1860 "unpriv: cmp map pointer with zero",
1861 .insns = {
1862 BPF_MOV64_IMM(BPF_REG_1, 0),
1863 BPF_LD_MAP_FD(BPF_REG_1, 0),
1864 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1865 BPF_MOV64_IMM(BPF_REG_0, 0),
1866 BPF_EXIT_INSN(),
1867 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001868 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001869 .errstr_unpriv = "R1 pointer comparison",
1870 .result_unpriv = REJECT,
1871 .result = ACCEPT,
1872 },
1873 {
1874 "unpriv: write into frame pointer",
1875 .insns = {
1876 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1877 BPF_MOV64_IMM(BPF_REG_0, 0),
1878 BPF_EXIT_INSN(),
1879 },
1880 .errstr = "frame pointer is read only",
1881 .result = REJECT,
1882 },
1883 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001884 "unpriv: spill/fill frame pointer",
1885 .insns = {
1886 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1888 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1889 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1890 BPF_MOV64_IMM(BPF_REG_0, 0),
1891 BPF_EXIT_INSN(),
1892 },
1893 .errstr = "frame pointer is read only",
1894 .result = REJECT,
1895 },
1896 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001897 "unpriv: cmp of frame pointer",
1898 .insns = {
1899 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1900 BPF_MOV64_IMM(BPF_REG_0, 0),
1901 BPF_EXIT_INSN(),
1902 },
1903 .errstr_unpriv = "R10 pointer comparison",
1904 .result_unpriv = REJECT,
1905 .result = ACCEPT,
1906 },
1907 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02001908 "unpriv: adding of fp",
1909 .insns = {
1910 BPF_MOV64_IMM(BPF_REG_0, 0),
1911 BPF_MOV64_IMM(BPF_REG_1, 0),
1912 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1913 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
1914 BPF_EXIT_INSN(),
1915 },
1916 .errstr_unpriv = "pointer arithmetic prohibited",
1917 .result_unpriv = REJECT,
1918 .errstr = "R1 invalid mem access",
1919 .result = REJECT,
1920 },
1921 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001922 "unpriv: cmp of stack pointer",
1923 .insns = {
1924 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1927 BPF_MOV64_IMM(BPF_REG_0, 0),
1928 BPF_EXIT_INSN(),
1929 },
1930 .errstr_unpriv = "R2 pointer comparison",
1931 .result_unpriv = REJECT,
1932 .result = ACCEPT,
1933 },
1934 {
Yonghong Song332270f2017-04-29 22:52:42 -07001935 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001936 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07001937 BPF_MOV64_IMM(BPF_REG_1, 4),
1938 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1939 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
1940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1943 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
1944 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
1945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
1947 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001948 BPF_MOV64_IMM(BPF_REG_0, 0),
1949 BPF_EXIT_INSN(),
1950 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001951 .result = ACCEPT,
1952 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001953 {
1954 "raw_stack: no skb_load_bytes",
1955 .insns = {
1956 BPF_MOV64_IMM(BPF_REG_2, 4),
1957 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1959 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1960 BPF_MOV64_IMM(BPF_REG_4, 8),
1961 /* Call to skb_load_bytes() omitted. */
1962 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1963 BPF_EXIT_INSN(),
1964 },
1965 .result = REJECT,
1966 .errstr = "invalid read from stack off -8+0 size 8",
1967 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1968 },
1969 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001970 "raw_stack: skb_load_bytes, negative len",
1971 .insns = {
1972 BPF_MOV64_IMM(BPF_REG_2, 4),
1973 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1975 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1976 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001977 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1978 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001979 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1980 BPF_EXIT_INSN(),
1981 },
1982 .result = REJECT,
1983 .errstr = "invalid stack type R3",
1984 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1985 },
1986 {
1987 "raw_stack: skb_load_bytes, negative len 2",
1988 .insns = {
1989 BPF_MOV64_IMM(BPF_REG_2, 4),
1990 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1992 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1993 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1995 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001996 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1997 BPF_EXIT_INSN(),
1998 },
1999 .result = REJECT,
2000 .errstr = "invalid stack type R3",
2001 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2002 },
2003 {
2004 "raw_stack: skb_load_bytes, zero len",
2005 .insns = {
2006 BPF_MOV64_IMM(BPF_REG_2, 4),
2007 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2008 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2009 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2010 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2012 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002013 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2014 BPF_EXIT_INSN(),
2015 },
2016 .result = REJECT,
2017 .errstr = "invalid stack type R3",
2018 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2019 },
2020 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002021 "raw_stack: skb_load_bytes, no init",
2022 .insns = {
2023 BPF_MOV64_IMM(BPF_REG_2, 4),
2024 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2026 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2027 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002028 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2029 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002030 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2031 BPF_EXIT_INSN(),
2032 },
2033 .result = ACCEPT,
2034 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2035 },
2036 {
2037 "raw_stack: skb_load_bytes, init",
2038 .insns = {
2039 BPF_MOV64_IMM(BPF_REG_2, 4),
2040 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2042 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2043 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2044 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2046 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002047 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2048 BPF_EXIT_INSN(),
2049 },
2050 .result = ACCEPT,
2051 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2052 },
2053 {
2054 "raw_stack: skb_load_bytes, spilled regs around bounds",
2055 .insns = {
2056 BPF_MOV64_IMM(BPF_REG_2, 4),
2057 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002059 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2060 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002061 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2062 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2064 BPF_FUNC_skb_load_bytes),
2065 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2066 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002067 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2068 offsetof(struct __sk_buff, mark)),
2069 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2070 offsetof(struct __sk_buff, priority)),
2071 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2072 BPF_EXIT_INSN(),
2073 },
2074 .result = ACCEPT,
2075 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2076 },
2077 {
2078 "raw_stack: skb_load_bytes, spilled regs corruption",
2079 .insns = {
2080 BPF_MOV64_IMM(BPF_REG_2, 4),
2081 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002083 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002084 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2085 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2087 BPF_FUNC_skb_load_bytes),
2088 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002089 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2090 offsetof(struct __sk_buff, mark)),
2091 BPF_EXIT_INSN(),
2092 },
2093 .result = REJECT,
2094 .errstr = "R0 invalid mem access 'inv'",
2095 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2096 },
2097 {
2098 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2099 .insns = {
2100 BPF_MOV64_IMM(BPF_REG_2, 4),
2101 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002103 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2104 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2105 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002106 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2107 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002108 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2109 BPF_FUNC_skb_load_bytes),
2110 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2111 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2112 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002113 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2114 offsetof(struct __sk_buff, mark)),
2115 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2116 offsetof(struct __sk_buff, priority)),
2117 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2118 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2119 offsetof(struct __sk_buff, pkt_type)),
2120 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2121 BPF_EXIT_INSN(),
2122 },
2123 .result = REJECT,
2124 .errstr = "R3 invalid mem access 'inv'",
2125 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2126 },
2127 {
2128 "raw_stack: skb_load_bytes, spilled regs + data",
2129 .insns = {
2130 BPF_MOV64_IMM(BPF_REG_2, 4),
2131 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002133 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2134 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2135 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002136 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2137 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002138 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2139 BPF_FUNC_skb_load_bytes),
2140 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2141 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2142 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002143 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2144 offsetof(struct __sk_buff, mark)),
2145 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2146 offsetof(struct __sk_buff, priority)),
2147 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2148 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2149 BPF_EXIT_INSN(),
2150 },
2151 .result = ACCEPT,
2152 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2153 },
2154 {
2155 "raw_stack: skb_load_bytes, invalid access 1",
2156 .insns = {
2157 BPF_MOV64_IMM(BPF_REG_2, 4),
2158 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2160 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2161 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002162 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2163 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002164 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2165 BPF_EXIT_INSN(),
2166 },
2167 .result = REJECT,
2168 .errstr = "invalid stack type R3 off=-513 access_size=8",
2169 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2170 },
2171 {
2172 "raw_stack: skb_load_bytes, invalid access 2",
2173 .insns = {
2174 BPF_MOV64_IMM(BPF_REG_2, 4),
2175 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2177 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2178 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2180 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002181 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2182 BPF_EXIT_INSN(),
2183 },
2184 .result = REJECT,
2185 .errstr = "invalid stack type R3 off=-1 access_size=8",
2186 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2187 },
2188 {
2189 "raw_stack: skb_load_bytes, invalid access 3",
2190 .insns = {
2191 BPF_MOV64_IMM(BPF_REG_2, 4),
2192 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2194 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2195 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2197 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002198 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2199 BPF_EXIT_INSN(),
2200 },
2201 .result = REJECT,
2202 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2203 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2204 },
2205 {
2206 "raw_stack: skb_load_bytes, invalid access 4",
2207 .insns = {
2208 BPF_MOV64_IMM(BPF_REG_2, 4),
2209 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2211 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2212 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2214 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002215 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2216 BPF_EXIT_INSN(),
2217 },
2218 .result = REJECT,
2219 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2220 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2221 },
2222 {
2223 "raw_stack: skb_load_bytes, invalid access 5",
2224 .insns = {
2225 BPF_MOV64_IMM(BPF_REG_2, 4),
2226 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2228 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2229 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002230 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2231 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002232 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2233 BPF_EXIT_INSN(),
2234 },
2235 .result = REJECT,
2236 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2238 },
2239 {
2240 "raw_stack: skb_load_bytes, invalid access 6",
2241 .insns = {
2242 BPF_MOV64_IMM(BPF_REG_2, 4),
2243 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2245 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2246 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2248 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002249 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2250 BPF_EXIT_INSN(),
2251 },
2252 .result = REJECT,
2253 .errstr = "invalid stack type R3 off=-512 access_size=0",
2254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2255 },
2256 {
2257 "raw_stack: skb_load_bytes, large access",
2258 .insns = {
2259 BPF_MOV64_IMM(BPF_REG_2, 4),
2260 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2262 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2263 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2265 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002266 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2267 BPF_EXIT_INSN(),
2268 },
2269 .result = ACCEPT,
2270 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2271 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002272 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002273 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002274 .insns = {
2275 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2276 offsetof(struct __sk_buff, data)),
2277 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2278 offsetof(struct __sk_buff, data_end)),
2279 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2281 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2282 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2283 BPF_MOV64_IMM(BPF_REG_0, 0),
2284 BPF_EXIT_INSN(),
2285 },
2286 .result = ACCEPT,
2287 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2288 },
2289 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002290 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002291 .insns = {
2292 BPF_MOV64_IMM(BPF_REG_0, 1),
2293 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2294 offsetof(struct __sk_buff, data_end)),
2295 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2296 offsetof(struct __sk_buff, data)),
2297 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2299 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2300 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2301 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2302 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2303 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2304 offsetof(struct __sk_buff, data)),
2305 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2306 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2307 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2308 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2309 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2312 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2313 offsetof(struct __sk_buff, data_end)),
2314 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2315 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2316 BPF_MOV64_IMM(BPF_REG_0, 0),
2317 BPF_EXIT_INSN(),
2318 },
2319 .result = ACCEPT,
2320 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2321 },
2322 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002323 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002324 .insns = {
2325 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2326 offsetof(struct __sk_buff, data)),
2327 BPF_MOV64_IMM(BPF_REG_0, 0),
2328 BPF_EXIT_INSN(),
2329 },
2330 .errstr = "invalid bpf_context access off=76",
2331 .result = REJECT,
2332 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2333 },
2334 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002335 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002336 .insns = {
2337 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2338 offsetof(struct __sk_buff, data)),
2339 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2340 offsetof(struct __sk_buff, data_end)),
2341 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2343 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2344 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2345 BPF_MOV64_IMM(BPF_REG_0, 0),
2346 BPF_EXIT_INSN(),
2347 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002348 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2350 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002351 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002352 "direct packet access: test5 (pkt_end >= reg, good access)",
2353 .insns = {
2354 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2355 offsetof(struct __sk_buff, data)),
2356 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2357 offsetof(struct __sk_buff, data_end)),
2358 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2360 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2361 BPF_MOV64_IMM(BPF_REG_0, 1),
2362 BPF_EXIT_INSN(),
2363 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2364 BPF_MOV64_IMM(BPF_REG_0, 0),
2365 BPF_EXIT_INSN(),
2366 },
2367 .result = ACCEPT,
2368 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2369 },
2370 {
2371 "direct packet access: test6 (pkt_end >= reg, bad access)",
2372 .insns = {
2373 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2374 offsetof(struct __sk_buff, data)),
2375 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2376 offsetof(struct __sk_buff, data_end)),
2377 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2379 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2380 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2381 BPF_MOV64_IMM(BPF_REG_0, 1),
2382 BPF_EXIT_INSN(),
2383 BPF_MOV64_IMM(BPF_REG_0, 0),
2384 BPF_EXIT_INSN(),
2385 },
2386 .errstr = "invalid access to packet",
2387 .result = REJECT,
2388 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2389 },
2390 {
2391 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2392 .insns = {
2393 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2394 offsetof(struct __sk_buff, data)),
2395 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2396 offsetof(struct __sk_buff, data_end)),
2397 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2399 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2400 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2401 BPF_MOV64_IMM(BPF_REG_0, 1),
2402 BPF_EXIT_INSN(),
2403 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2404 BPF_MOV64_IMM(BPF_REG_0, 0),
2405 BPF_EXIT_INSN(),
2406 },
2407 .errstr = "invalid access to packet",
2408 .result = REJECT,
2409 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2410 },
2411 {
2412 "direct packet access: test8 (double test, variant 1)",
2413 .insns = {
2414 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2415 offsetof(struct __sk_buff, data)),
2416 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2417 offsetof(struct __sk_buff, data_end)),
2418 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2420 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2421 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2422 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2423 BPF_MOV64_IMM(BPF_REG_0, 1),
2424 BPF_EXIT_INSN(),
2425 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2426 BPF_MOV64_IMM(BPF_REG_0, 0),
2427 BPF_EXIT_INSN(),
2428 },
2429 .result = ACCEPT,
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2431 },
2432 {
2433 "direct packet access: test9 (double test, variant 2)",
2434 .insns = {
2435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2436 offsetof(struct __sk_buff, data)),
2437 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2438 offsetof(struct __sk_buff, data_end)),
2439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2441 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2442 BPF_MOV64_IMM(BPF_REG_0, 1),
2443 BPF_EXIT_INSN(),
2444 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2446 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2447 BPF_MOV64_IMM(BPF_REG_0, 0),
2448 BPF_EXIT_INSN(),
2449 },
2450 .result = ACCEPT,
2451 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2452 },
2453 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002454 "direct packet access: test10 (write invalid)",
2455 .insns = {
2456 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2457 offsetof(struct __sk_buff, data)),
2458 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2459 offsetof(struct __sk_buff, data_end)),
2460 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2462 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2463 BPF_MOV64_IMM(BPF_REG_0, 0),
2464 BPF_EXIT_INSN(),
2465 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2466 BPF_MOV64_IMM(BPF_REG_0, 0),
2467 BPF_EXIT_INSN(),
2468 },
2469 .errstr = "invalid access to packet",
2470 .result = REJECT,
2471 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2472 },
2473 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002474 "direct packet access: test11 (shift, good access)",
2475 .insns = {
2476 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2477 offsetof(struct __sk_buff, data)),
2478 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2479 offsetof(struct __sk_buff, data_end)),
2480 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2482 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2483 BPF_MOV64_IMM(BPF_REG_3, 144),
2484 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2486 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2487 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2488 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2489 BPF_MOV64_IMM(BPF_REG_0, 1),
2490 BPF_EXIT_INSN(),
2491 BPF_MOV64_IMM(BPF_REG_0, 0),
2492 BPF_EXIT_INSN(),
2493 },
2494 .result = ACCEPT,
2495 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2496 },
2497 {
2498 "direct packet access: test12 (and, good access)",
2499 .insns = {
2500 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2501 offsetof(struct __sk_buff, data)),
2502 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2503 offsetof(struct __sk_buff, data_end)),
2504 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2506 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2507 BPF_MOV64_IMM(BPF_REG_3, 144),
2508 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2510 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2511 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2512 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2513 BPF_MOV64_IMM(BPF_REG_0, 1),
2514 BPF_EXIT_INSN(),
2515 BPF_MOV64_IMM(BPF_REG_0, 0),
2516 BPF_EXIT_INSN(),
2517 },
2518 .result = ACCEPT,
2519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2520 },
2521 {
2522 "direct packet access: test13 (branches, good access)",
2523 .insns = {
2524 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2525 offsetof(struct __sk_buff, data)),
2526 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2527 offsetof(struct __sk_buff, data_end)),
2528 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2530 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2532 offsetof(struct __sk_buff, mark)),
2533 BPF_MOV64_IMM(BPF_REG_4, 1),
2534 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2535 BPF_MOV64_IMM(BPF_REG_3, 14),
2536 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2537 BPF_MOV64_IMM(BPF_REG_3, 24),
2538 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2540 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2541 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2542 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2543 BPF_MOV64_IMM(BPF_REG_0, 1),
2544 BPF_EXIT_INSN(),
2545 BPF_MOV64_IMM(BPF_REG_0, 0),
2546 BPF_EXIT_INSN(),
2547 },
2548 .result = ACCEPT,
2549 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2550 },
2551 {
William Tu63dfef72017-02-04 08:37:29 -08002552 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2553 .insns = {
2554 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2555 offsetof(struct __sk_buff, data)),
2556 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2557 offsetof(struct __sk_buff, data_end)),
2558 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2560 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2561 BPF_MOV64_IMM(BPF_REG_5, 12),
2562 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2563 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2564 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2565 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2566 BPF_MOV64_IMM(BPF_REG_0, 1),
2567 BPF_EXIT_INSN(),
2568 BPF_MOV64_IMM(BPF_REG_0, 0),
2569 BPF_EXIT_INSN(),
2570 },
2571 .result = ACCEPT,
2572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2573 },
2574 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002575 "direct packet access: test15 (spill with xadd)",
2576 .insns = {
2577 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2578 offsetof(struct __sk_buff, data)),
2579 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2580 offsetof(struct __sk_buff, data_end)),
2581 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2583 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2584 BPF_MOV64_IMM(BPF_REG_5, 4096),
2585 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2587 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2588 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2589 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2590 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2591 BPF_MOV64_IMM(BPF_REG_0, 0),
2592 BPF_EXIT_INSN(),
2593 },
2594 .errstr = "R2 invalid mem access 'inv'",
2595 .result = REJECT,
2596 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2597 },
2598 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002599 "direct packet access: test16 (arith on data_end)",
2600 .insns = {
2601 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2602 offsetof(struct __sk_buff, data)),
2603 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2604 offsetof(struct __sk_buff, data_end)),
2605 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2608 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2609 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2610 BPF_MOV64_IMM(BPF_REG_0, 0),
2611 BPF_EXIT_INSN(),
2612 },
2613 .errstr = "invalid access to packet",
2614 .result = REJECT,
2615 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2616 },
2617 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002618 "helper access to packet: test1, valid packet_ptr range",
2619 .insns = {
2620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2621 offsetof(struct xdp_md, data)),
2622 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2623 offsetof(struct xdp_md, data_end)),
2624 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2626 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2627 BPF_LD_MAP_FD(BPF_REG_1, 0),
2628 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2629 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002630 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2631 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002632 BPF_MOV64_IMM(BPF_REG_0, 0),
2633 BPF_EXIT_INSN(),
2634 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002635 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002636 .result_unpriv = ACCEPT,
2637 .result = ACCEPT,
2638 .prog_type = BPF_PROG_TYPE_XDP,
2639 },
2640 {
2641 "helper access to packet: test2, unchecked packet_ptr",
2642 .insns = {
2643 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2644 offsetof(struct xdp_md, data)),
2645 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2647 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002648 BPF_MOV64_IMM(BPF_REG_0, 0),
2649 BPF_EXIT_INSN(),
2650 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002651 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002652 .result = REJECT,
2653 .errstr = "invalid access to packet",
2654 .prog_type = BPF_PROG_TYPE_XDP,
2655 },
2656 {
2657 "helper access to packet: test3, variable add",
2658 .insns = {
2659 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2660 offsetof(struct xdp_md, data)),
2661 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2662 offsetof(struct xdp_md, data_end)),
2663 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2665 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2666 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2667 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2668 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2669 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2671 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2672 BPF_LD_MAP_FD(BPF_REG_1, 0),
2673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2675 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002676 BPF_MOV64_IMM(BPF_REG_0, 0),
2677 BPF_EXIT_INSN(),
2678 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002679 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002680 .result = ACCEPT,
2681 .prog_type = BPF_PROG_TYPE_XDP,
2682 },
2683 {
2684 "helper access to packet: test4, packet_ptr with bad range",
2685 .insns = {
2686 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2687 offsetof(struct xdp_md, data)),
2688 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2689 offsetof(struct xdp_md, data_end)),
2690 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2692 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2693 BPF_MOV64_IMM(BPF_REG_0, 0),
2694 BPF_EXIT_INSN(),
2695 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002696 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2697 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002698 BPF_MOV64_IMM(BPF_REG_0, 0),
2699 BPF_EXIT_INSN(),
2700 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002701 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002702 .result = REJECT,
2703 .errstr = "invalid access to packet",
2704 .prog_type = BPF_PROG_TYPE_XDP,
2705 },
2706 {
2707 "helper access to packet: test5, packet_ptr with too short range",
2708 .insns = {
2709 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2710 offsetof(struct xdp_md, data)),
2711 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2712 offsetof(struct xdp_md, data_end)),
2713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2714 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2716 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2717 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2719 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002720 BPF_MOV64_IMM(BPF_REG_0, 0),
2721 BPF_EXIT_INSN(),
2722 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002723 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002724 .result = REJECT,
2725 .errstr = "invalid access to packet",
2726 .prog_type = BPF_PROG_TYPE_XDP,
2727 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002728 {
2729 "helper access to packet: test6, cls valid packet_ptr range",
2730 .insns = {
2731 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2732 offsetof(struct __sk_buff, data)),
2733 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2734 offsetof(struct __sk_buff, data_end)),
2735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2737 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2738 BPF_LD_MAP_FD(BPF_REG_1, 0),
2739 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2740 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2742 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002743 BPF_MOV64_IMM(BPF_REG_0, 0),
2744 BPF_EXIT_INSN(),
2745 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002746 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002747 .result = ACCEPT,
2748 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2749 },
2750 {
2751 "helper access to packet: test7, cls unchecked packet_ptr",
2752 .insns = {
2753 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2754 offsetof(struct __sk_buff, data)),
2755 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2757 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002758 BPF_MOV64_IMM(BPF_REG_0, 0),
2759 BPF_EXIT_INSN(),
2760 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002761 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002762 .result = REJECT,
2763 .errstr = "invalid access to packet",
2764 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2765 },
2766 {
2767 "helper access to packet: test8, cls variable add",
2768 .insns = {
2769 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2770 offsetof(struct __sk_buff, data)),
2771 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2772 offsetof(struct __sk_buff, data_end)),
2773 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2775 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2776 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2777 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2778 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2779 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2781 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2782 BPF_LD_MAP_FD(BPF_REG_1, 0),
2783 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002784 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2785 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002786 BPF_MOV64_IMM(BPF_REG_0, 0),
2787 BPF_EXIT_INSN(),
2788 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002789 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002790 .result = ACCEPT,
2791 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2792 },
2793 {
2794 "helper access to packet: test9, cls packet_ptr with bad range",
2795 .insns = {
2796 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2797 offsetof(struct __sk_buff, data)),
2798 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2799 offsetof(struct __sk_buff, data_end)),
2800 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2802 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2803 BPF_MOV64_IMM(BPF_REG_0, 0),
2804 BPF_EXIT_INSN(),
2805 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002806 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2807 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002808 BPF_MOV64_IMM(BPF_REG_0, 0),
2809 BPF_EXIT_INSN(),
2810 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002811 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002812 .result = REJECT,
2813 .errstr = "invalid access to packet",
2814 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2815 },
2816 {
2817 "helper access to packet: test10, cls packet_ptr with too short range",
2818 .insns = {
2819 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2820 offsetof(struct __sk_buff, data)),
2821 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2822 offsetof(struct __sk_buff, data_end)),
2823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2824 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2826 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2827 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2829 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002830 BPF_MOV64_IMM(BPF_REG_0, 0),
2831 BPF_EXIT_INSN(),
2832 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002833 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002834 .result = REJECT,
2835 .errstr = "invalid access to packet",
2836 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2837 },
2838 {
2839 "helper access to packet: test11, cls unsuitable helper 1",
2840 .insns = {
2841 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2842 offsetof(struct __sk_buff, data)),
2843 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2844 offsetof(struct __sk_buff, data_end)),
2845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2846 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2848 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2849 BPF_MOV64_IMM(BPF_REG_2, 0),
2850 BPF_MOV64_IMM(BPF_REG_4, 42),
2851 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002852 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2853 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002854 BPF_MOV64_IMM(BPF_REG_0, 0),
2855 BPF_EXIT_INSN(),
2856 },
2857 .result = REJECT,
2858 .errstr = "helper access to the packet",
2859 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2860 },
2861 {
2862 "helper access to packet: test12, cls unsuitable helper 2",
2863 .insns = {
2864 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2865 offsetof(struct __sk_buff, data)),
2866 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2867 offsetof(struct __sk_buff, data_end)),
2868 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2870 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2871 BPF_MOV64_IMM(BPF_REG_2, 0),
2872 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002873 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2874 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002875 BPF_MOV64_IMM(BPF_REG_0, 0),
2876 BPF_EXIT_INSN(),
2877 },
2878 .result = REJECT,
2879 .errstr = "helper access to the packet",
2880 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2881 },
2882 {
2883 "helper access to packet: test13, cls helper ok",
2884 .insns = {
2885 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2886 offsetof(struct __sk_buff, data)),
2887 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2888 offsetof(struct __sk_buff, data_end)),
2889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2890 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2892 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2894 BPF_MOV64_IMM(BPF_REG_2, 4),
2895 BPF_MOV64_IMM(BPF_REG_3, 0),
2896 BPF_MOV64_IMM(BPF_REG_4, 0),
2897 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2899 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002900 BPF_MOV64_IMM(BPF_REG_0, 0),
2901 BPF_EXIT_INSN(),
2902 },
2903 .result = ACCEPT,
2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 },
2906 {
2907 "helper access to packet: test14, cls helper fail sub",
2908 .insns = {
2909 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2910 offsetof(struct __sk_buff, data)),
2911 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2912 offsetof(struct __sk_buff, data_end)),
2913 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2914 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2916 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2917 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2918 BPF_MOV64_IMM(BPF_REG_2, 4),
2919 BPF_MOV64_IMM(BPF_REG_3, 0),
2920 BPF_MOV64_IMM(BPF_REG_4, 0),
2921 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2923 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002924 BPF_MOV64_IMM(BPF_REG_0, 0),
2925 BPF_EXIT_INSN(),
2926 },
2927 .result = REJECT,
2928 .errstr = "type=inv expected=fp",
2929 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2930 },
2931 {
2932 "helper access to packet: test15, cls helper fail range 1",
2933 .insns = {
2934 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2935 offsetof(struct __sk_buff, data)),
2936 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2937 offsetof(struct __sk_buff, data_end)),
2938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2941 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2943 BPF_MOV64_IMM(BPF_REG_2, 8),
2944 BPF_MOV64_IMM(BPF_REG_3, 0),
2945 BPF_MOV64_IMM(BPF_REG_4, 0),
2946 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2948 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002949 BPF_MOV64_IMM(BPF_REG_0, 0),
2950 BPF_EXIT_INSN(),
2951 },
2952 .result = REJECT,
2953 .errstr = "invalid access to packet",
2954 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2955 },
2956 {
2957 "helper access to packet: test16, cls helper fail range 2",
2958 .insns = {
2959 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2960 offsetof(struct __sk_buff, data)),
2961 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2962 offsetof(struct __sk_buff, data_end)),
2963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2964 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2966 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2968 BPF_MOV64_IMM(BPF_REG_2, -9),
2969 BPF_MOV64_IMM(BPF_REG_3, 0),
2970 BPF_MOV64_IMM(BPF_REG_4, 0),
2971 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2973 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002974 BPF_MOV64_IMM(BPF_REG_0, 0),
2975 BPF_EXIT_INSN(),
2976 },
2977 .result = REJECT,
2978 .errstr = "invalid access to packet",
2979 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2980 },
2981 {
2982 "helper access to packet: test17, cls helper fail range 3",
2983 .insns = {
2984 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2985 offsetof(struct __sk_buff, data)),
2986 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2987 offsetof(struct __sk_buff, data_end)),
2988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2989 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2991 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2993 BPF_MOV64_IMM(BPF_REG_2, ~0),
2994 BPF_MOV64_IMM(BPF_REG_3, 0),
2995 BPF_MOV64_IMM(BPF_REG_4, 0),
2996 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2998 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002999 BPF_MOV64_IMM(BPF_REG_0, 0),
3000 BPF_EXIT_INSN(),
3001 },
3002 .result = REJECT,
3003 .errstr = "invalid access to packet",
3004 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3005 },
3006 {
3007 "helper access to packet: test18, cls helper fail range zero",
3008 .insns = {
3009 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3010 offsetof(struct __sk_buff, data)),
3011 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3012 offsetof(struct __sk_buff, data_end)),
3013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3014 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3016 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3017 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3018 BPF_MOV64_IMM(BPF_REG_2, 0),
3019 BPF_MOV64_IMM(BPF_REG_3, 0),
3020 BPF_MOV64_IMM(BPF_REG_4, 0),
3021 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3023 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003024 BPF_MOV64_IMM(BPF_REG_0, 0),
3025 BPF_EXIT_INSN(),
3026 },
3027 .result = REJECT,
3028 .errstr = "invalid access to packet",
3029 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3030 },
3031 {
3032 "helper access to packet: test19, pkt end as input",
3033 .insns = {
3034 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3035 offsetof(struct __sk_buff, data)),
3036 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3037 offsetof(struct __sk_buff, data_end)),
3038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3039 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3041 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3042 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3043 BPF_MOV64_IMM(BPF_REG_2, 4),
3044 BPF_MOV64_IMM(BPF_REG_3, 0),
3045 BPF_MOV64_IMM(BPF_REG_4, 0),
3046 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003047 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3048 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003049 BPF_MOV64_IMM(BPF_REG_0, 0),
3050 BPF_EXIT_INSN(),
3051 },
3052 .result = REJECT,
3053 .errstr = "R1 type=pkt_end expected=fp",
3054 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3055 },
3056 {
3057 "helper access to packet: test20, wrong reg",
3058 .insns = {
3059 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3060 offsetof(struct __sk_buff, data)),
3061 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3062 offsetof(struct __sk_buff, data_end)),
3063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3064 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3066 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3067 BPF_MOV64_IMM(BPF_REG_2, 4),
3068 BPF_MOV64_IMM(BPF_REG_3, 0),
3069 BPF_MOV64_IMM(BPF_REG_4, 0),
3070 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003071 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3072 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003073 BPF_MOV64_IMM(BPF_REG_0, 0),
3074 BPF_EXIT_INSN(),
3075 },
3076 .result = REJECT,
3077 .errstr = "invalid access to packet",
3078 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3079 },
Josef Bacik48461132016-09-28 10:54:32 -04003080 {
3081 "valid map access into an array with a constant",
3082 .insns = {
3083 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3086 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3088 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003089 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003090 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3091 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003092 BPF_EXIT_INSN(),
3093 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003094 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003095 .errstr_unpriv = "R0 leaks addr",
3096 .result_unpriv = REJECT,
3097 .result = ACCEPT,
3098 },
3099 {
3100 "valid map access into an array with a register",
3101 .insns = {
3102 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3105 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3107 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3109 BPF_MOV64_IMM(BPF_REG_1, 4),
3110 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3111 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003112 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3113 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003114 BPF_EXIT_INSN(),
3115 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003116 .fixup_map2 = { 3 },
3117 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003118 .result_unpriv = REJECT,
3119 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003120 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003121 },
3122 {
3123 "valid map access into an array with a variable",
3124 .insns = {
3125 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3126 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3128 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3130 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003131 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3132 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3133 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3134 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3135 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003136 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3137 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003138 BPF_EXIT_INSN(),
3139 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003140 .fixup_map2 = { 3 },
3141 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003142 .result_unpriv = REJECT,
3143 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003144 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003145 },
3146 {
3147 "valid map access into an array with a signed variable",
3148 .insns = {
3149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3152 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3154 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3156 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3157 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3158 BPF_MOV32_IMM(BPF_REG_1, 0),
3159 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3160 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3161 BPF_MOV32_IMM(BPF_REG_1, 0),
3162 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3163 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003164 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3165 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003166 BPF_EXIT_INSN(),
3167 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003168 .fixup_map2 = { 3 },
3169 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003170 .result_unpriv = REJECT,
3171 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003172 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003173 },
3174 {
3175 "invalid map access into an array with a constant",
3176 .insns = {
3177 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3178 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3180 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3182 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003183 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3184 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3185 offsetof(struct test_val, foo)),
3186 BPF_EXIT_INSN(),
3187 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003188 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003189 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3190 .result = REJECT,
3191 },
3192 {
3193 "invalid map access into an array with a register",
3194 .insns = {
3195 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3198 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3200 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003201 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3202 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3203 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3204 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003205 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3206 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003207 BPF_EXIT_INSN(),
3208 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003209 .fixup_map2 = { 3 },
3210 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003211 .errstr = "R0 min value is outside of the array range",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003212 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003213 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003214 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003215 },
3216 {
3217 "invalid map access into an array with a variable",
3218 .insns = {
3219 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3222 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3224 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3226 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3227 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3228 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003229 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3230 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003231 BPF_EXIT_INSN(),
3232 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003233 .fixup_map2 = { 3 },
3234 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003235 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003236 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003237 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003238 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003239 },
3240 {
3241 "invalid map access into an array with no floor check",
3242 .insns = {
3243 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3244 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3246 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3248 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003249 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3250 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3251 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3252 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3253 BPF_MOV32_IMM(BPF_REG_1, 0),
3254 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3255 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003256 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3257 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003258 BPF_EXIT_INSN(),
3259 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003260 .fixup_map2 = { 3 },
3261 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003262 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003263 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003264 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003265 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003266 },
3267 {
3268 "invalid map access into an array with a invalid max check",
3269 .insns = {
3270 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3273 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3275 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3277 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3278 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3279 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3280 BPF_MOV32_IMM(BPF_REG_1, 0),
3281 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3282 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003283 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3284 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003285 BPF_EXIT_INSN(),
3286 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003287 .fixup_map2 = { 3 },
3288 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003289 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003290 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003291 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003292 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003293 },
3294 {
3295 "invalid map access into an array with a invalid max check",
3296 .insns = {
3297 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3298 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3300 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3302 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003303 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3304 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3305 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3306 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3308 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3310 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003311 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3312 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003313 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3314 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003315 BPF_EXIT_INSN(),
3316 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003317 .fixup_map2 = { 3, 11 },
3318 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003319 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003320 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003321 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003322 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003323 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003324 {
3325 "multiple registers share map_lookup_elem result",
3326 .insns = {
3327 BPF_MOV64_IMM(BPF_REG_1, 10),
3328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3329 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3331 BPF_LD_MAP_FD(BPF_REG_1, 0),
3332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3333 BPF_FUNC_map_lookup_elem),
3334 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3335 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3336 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3337 BPF_EXIT_INSN(),
3338 },
3339 .fixup_map1 = { 4 },
3340 .result = ACCEPT,
3341 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3342 },
3343 {
3344 "invalid memory access with multiple map_lookup_elem calls",
3345 .insns = {
3346 BPF_MOV64_IMM(BPF_REG_1, 10),
3347 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3348 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3350 BPF_LD_MAP_FD(BPF_REG_1, 0),
3351 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3352 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3354 BPF_FUNC_map_lookup_elem),
3355 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3356 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3357 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3359 BPF_FUNC_map_lookup_elem),
3360 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3361 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3362 BPF_EXIT_INSN(),
3363 },
3364 .fixup_map1 = { 4 },
3365 .result = REJECT,
3366 .errstr = "R4 !read_ok",
3367 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3368 },
3369 {
3370 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3371 .insns = {
3372 BPF_MOV64_IMM(BPF_REG_1, 10),
3373 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3374 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3376 BPF_LD_MAP_FD(BPF_REG_1, 0),
3377 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3378 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3380 BPF_FUNC_map_lookup_elem),
3381 BPF_MOV64_IMM(BPF_REG_2, 10),
3382 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3384 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3386 BPF_FUNC_map_lookup_elem),
3387 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3388 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3389 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3390 BPF_EXIT_INSN(),
3391 },
3392 .fixup_map1 = { 4 },
3393 .result = ACCEPT,
3394 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3395 },
Josef Bacike9548902016-11-29 12:35:19 -05003396 {
Daniel Borkmanna08dd0d2016-12-15 01:30:06 +01003397 "multiple registers share map_lookup_elem bad reg type",
3398 .insns = {
3399 BPF_MOV64_IMM(BPF_REG_1, 10),
3400 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3403 BPF_LD_MAP_FD(BPF_REG_1, 0),
3404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3405 BPF_FUNC_map_lookup_elem),
3406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3407 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3408 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3409 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3410 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3411 BPF_MOV64_IMM(BPF_REG_1, 1),
3412 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3413 BPF_MOV64_IMM(BPF_REG_1, 2),
3414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3415 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3416 BPF_MOV64_IMM(BPF_REG_1, 3),
3417 BPF_EXIT_INSN(),
3418 },
3419 .fixup_map1 = { 4 },
3420 .result = REJECT,
3421 .errstr = "R3 invalid mem access 'inv'",
3422 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3423 },
3424 {
Josef Bacike9548902016-11-29 12:35:19 -05003425 "invalid map access from else condition",
3426 .insns = {
3427 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3430 BPF_LD_MAP_FD(BPF_REG_1, 0),
3431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3432 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3433 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3434 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3436 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3437 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3438 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3439 BPF_EXIT_INSN(),
3440 },
3441 .fixup_map2 = { 3 },
3442 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3443 .result = REJECT,
3444 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3445 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003446 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05003447 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08003448 {
3449 "constant register |= constant should keep constant type",
3450 .insns = {
3451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3453 BPF_MOV64_IMM(BPF_REG_2, 34),
3454 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3455 BPF_MOV64_IMM(BPF_REG_3, 0),
3456 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3457 BPF_EXIT_INSN(),
3458 },
3459 .result = ACCEPT,
3460 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3461 },
3462 {
3463 "constant register |= constant should not bypass stack boundary checks",
3464 .insns = {
3465 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3466 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3467 BPF_MOV64_IMM(BPF_REG_2, 34),
3468 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3469 BPF_MOV64_IMM(BPF_REG_3, 0),
3470 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3471 BPF_EXIT_INSN(),
3472 },
3473 .errstr = "invalid stack type R1 off=-48 access_size=58",
3474 .result = REJECT,
3475 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3476 },
3477 {
3478 "constant register |= constant register should keep constant type",
3479 .insns = {
3480 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3482 BPF_MOV64_IMM(BPF_REG_2, 34),
3483 BPF_MOV64_IMM(BPF_REG_4, 13),
3484 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3485 BPF_MOV64_IMM(BPF_REG_3, 0),
3486 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3487 BPF_EXIT_INSN(),
3488 },
3489 .result = ACCEPT,
3490 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3491 },
3492 {
3493 "constant register |= constant register should not bypass stack boundary checks",
3494 .insns = {
3495 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3497 BPF_MOV64_IMM(BPF_REG_2, 34),
3498 BPF_MOV64_IMM(BPF_REG_4, 24),
3499 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3500 BPF_MOV64_IMM(BPF_REG_3, 0),
3501 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3502 BPF_EXIT_INSN(),
3503 },
3504 .errstr = "invalid stack type R1 off=-48 access_size=58",
3505 .result = REJECT,
3506 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3507 },
Thomas Graf3f731d82016-12-05 10:30:52 +01003508 {
3509 "invalid direct packet write for LWT_IN",
3510 .insns = {
3511 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3512 offsetof(struct __sk_buff, data)),
3513 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3514 offsetof(struct __sk_buff, data_end)),
3515 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3517 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3518 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3519 BPF_MOV64_IMM(BPF_REG_0, 0),
3520 BPF_EXIT_INSN(),
3521 },
3522 .errstr = "cannot write into packet",
3523 .result = REJECT,
3524 .prog_type = BPF_PROG_TYPE_LWT_IN,
3525 },
3526 {
3527 "invalid direct packet write for LWT_OUT",
3528 .insns = {
3529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3530 offsetof(struct __sk_buff, data)),
3531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3532 offsetof(struct __sk_buff, data_end)),
3533 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3535 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3536 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3537 BPF_MOV64_IMM(BPF_REG_0, 0),
3538 BPF_EXIT_INSN(),
3539 },
3540 .errstr = "cannot write into packet",
3541 .result = REJECT,
3542 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3543 },
3544 {
3545 "direct packet write for LWT_XMIT",
3546 .insns = {
3547 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3548 offsetof(struct __sk_buff, data)),
3549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3550 offsetof(struct __sk_buff, data_end)),
3551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3553 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3554 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3555 BPF_MOV64_IMM(BPF_REG_0, 0),
3556 BPF_EXIT_INSN(),
3557 },
3558 .result = ACCEPT,
3559 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3560 },
3561 {
3562 "direct packet read for LWT_IN",
3563 .insns = {
3564 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3565 offsetof(struct __sk_buff, data)),
3566 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3567 offsetof(struct __sk_buff, data_end)),
3568 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3570 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3571 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3572 BPF_MOV64_IMM(BPF_REG_0, 0),
3573 BPF_EXIT_INSN(),
3574 },
3575 .result = ACCEPT,
3576 .prog_type = BPF_PROG_TYPE_LWT_IN,
3577 },
3578 {
3579 "direct packet read for LWT_OUT",
3580 .insns = {
3581 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3582 offsetof(struct __sk_buff, data)),
3583 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3584 offsetof(struct __sk_buff, data_end)),
3585 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3587 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3588 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3589 BPF_MOV64_IMM(BPF_REG_0, 0),
3590 BPF_EXIT_INSN(),
3591 },
3592 .result = ACCEPT,
3593 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3594 },
3595 {
3596 "direct packet read for LWT_XMIT",
3597 .insns = {
3598 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3599 offsetof(struct __sk_buff, data)),
3600 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3601 offsetof(struct __sk_buff, data_end)),
3602 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3604 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3605 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3606 BPF_MOV64_IMM(BPF_REG_0, 0),
3607 BPF_EXIT_INSN(),
3608 },
3609 .result = ACCEPT,
3610 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3611 },
3612 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07003613 "overlapping checks for direct packet access",
3614 .insns = {
3615 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3616 offsetof(struct __sk_buff, data)),
3617 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3618 offsetof(struct __sk_buff, data_end)),
3619 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3621 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3624 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3625 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3626 BPF_MOV64_IMM(BPF_REG_0, 0),
3627 BPF_EXIT_INSN(),
3628 },
3629 .result = ACCEPT,
3630 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3631 },
3632 {
Thomas Graf3f731d82016-12-05 10:30:52 +01003633 "invalid access of tc_classid for LWT_IN",
3634 .insns = {
3635 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3636 offsetof(struct __sk_buff, tc_classid)),
3637 BPF_EXIT_INSN(),
3638 },
3639 .result = REJECT,
3640 .errstr = "invalid bpf_context access",
3641 },
3642 {
3643 "invalid access of tc_classid for LWT_OUT",
3644 .insns = {
3645 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3646 offsetof(struct __sk_buff, tc_classid)),
3647 BPF_EXIT_INSN(),
3648 },
3649 .result = REJECT,
3650 .errstr = "invalid bpf_context access",
3651 },
3652 {
3653 "invalid access of tc_classid for LWT_XMIT",
3654 .insns = {
3655 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3656 offsetof(struct __sk_buff, tc_classid)),
3657 BPF_EXIT_INSN(),
3658 },
3659 .result = REJECT,
3660 .errstr = "invalid bpf_context access",
3661 },
Gianluca Borello57225692017-01-09 10:19:47 -08003662 {
3663 "helper access to map: full range",
3664 .insns = {
3665 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3667 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3668 BPF_LD_MAP_FD(BPF_REG_1, 0),
3669 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3671 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3672 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3673 BPF_MOV64_IMM(BPF_REG_3, 0),
3674 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3675 BPF_EXIT_INSN(),
3676 },
3677 .fixup_map2 = { 3 },
3678 .result = ACCEPT,
3679 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3680 },
3681 {
3682 "helper access to map: partial range",
3683 .insns = {
3684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3686 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3687 BPF_LD_MAP_FD(BPF_REG_1, 0),
3688 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3691 BPF_MOV64_IMM(BPF_REG_2, 8),
3692 BPF_MOV64_IMM(BPF_REG_3, 0),
3693 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3694 BPF_EXIT_INSN(),
3695 },
3696 .fixup_map2 = { 3 },
3697 .result = ACCEPT,
3698 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3699 },
3700 {
3701 "helper access to map: empty range",
3702 .insns = {
3703 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3705 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3706 BPF_LD_MAP_FD(BPF_REG_1, 0),
3707 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3710 BPF_MOV64_IMM(BPF_REG_2, 0),
3711 BPF_MOV64_IMM(BPF_REG_3, 0),
3712 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3713 BPF_EXIT_INSN(),
3714 },
3715 .fixup_map2 = { 3 },
3716 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3717 .result = REJECT,
3718 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3719 },
3720 {
3721 "helper access to map: out-of-bound range",
3722 .insns = {
3723 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3725 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3726 BPF_LD_MAP_FD(BPF_REG_1, 0),
3727 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3728 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3729 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3730 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3731 BPF_MOV64_IMM(BPF_REG_3, 0),
3732 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3733 BPF_EXIT_INSN(),
3734 },
3735 .fixup_map2 = { 3 },
3736 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3737 .result = REJECT,
3738 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3739 },
3740 {
3741 "helper access to map: negative range",
3742 .insns = {
3743 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3745 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3746 BPF_LD_MAP_FD(BPF_REG_1, 0),
3747 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3749 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3750 BPF_MOV64_IMM(BPF_REG_2, -8),
3751 BPF_MOV64_IMM(BPF_REG_3, 0),
3752 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3753 BPF_EXIT_INSN(),
3754 },
3755 .fixup_map2 = { 3 },
3756 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3757 .result = REJECT,
3758 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3759 },
3760 {
3761 "helper access to adjusted map (via const imm): full range",
3762 .insns = {
3763 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3765 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3766 BPF_LD_MAP_FD(BPF_REG_1, 0),
3767 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3769 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3771 offsetof(struct test_val, foo)),
3772 BPF_MOV64_IMM(BPF_REG_2,
3773 sizeof(struct test_val) -
3774 offsetof(struct test_val, foo)),
3775 BPF_MOV64_IMM(BPF_REG_3, 0),
3776 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3777 BPF_EXIT_INSN(),
3778 },
3779 .fixup_map2 = { 3 },
3780 .result = ACCEPT,
3781 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3782 },
3783 {
3784 "helper access to adjusted map (via const imm): partial range",
3785 .insns = {
3786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3788 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3789 BPF_LD_MAP_FD(BPF_REG_1, 0),
3790 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3794 offsetof(struct test_val, foo)),
3795 BPF_MOV64_IMM(BPF_REG_2, 8),
3796 BPF_MOV64_IMM(BPF_REG_3, 0),
3797 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3798 BPF_EXIT_INSN(),
3799 },
3800 .fixup_map2 = { 3 },
3801 .result = ACCEPT,
3802 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3803 },
3804 {
3805 "helper access to adjusted map (via const imm): empty range",
3806 .insns = {
3807 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3809 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3810 BPF_LD_MAP_FD(BPF_REG_1, 0),
3811 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3812 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3813 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3815 offsetof(struct test_val, foo)),
3816 BPF_MOV64_IMM(BPF_REG_2, 0),
3817 BPF_MOV64_IMM(BPF_REG_3, 0),
3818 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3819 BPF_EXIT_INSN(),
3820 },
3821 .fixup_map2 = { 3 },
3822 .errstr = "R1 min value is outside of the array range",
3823 .result = REJECT,
3824 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3825 },
3826 {
3827 "helper access to adjusted map (via const imm): out-of-bound range",
3828 .insns = {
3829 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3831 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3832 BPF_LD_MAP_FD(BPF_REG_1, 0),
3833 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3835 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3837 offsetof(struct test_val, foo)),
3838 BPF_MOV64_IMM(BPF_REG_2,
3839 sizeof(struct test_val) -
3840 offsetof(struct test_val, foo) + 8),
3841 BPF_MOV64_IMM(BPF_REG_3, 0),
3842 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3843 BPF_EXIT_INSN(),
3844 },
3845 .fixup_map2 = { 3 },
3846 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3847 .result = REJECT,
3848 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3849 },
3850 {
3851 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3852 .insns = {
3853 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3855 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3856 BPF_LD_MAP_FD(BPF_REG_1, 0),
3857 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3858 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3859 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3861 offsetof(struct test_val, foo)),
3862 BPF_MOV64_IMM(BPF_REG_2, -8),
3863 BPF_MOV64_IMM(BPF_REG_3, 0),
3864 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3865 BPF_EXIT_INSN(),
3866 },
3867 .fixup_map2 = { 3 },
3868 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3869 .result = REJECT,
3870 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3871 },
3872 {
3873 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3874 .insns = {
3875 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3877 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3878 BPF_LD_MAP_FD(BPF_REG_1, 0),
3879 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3880 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3883 offsetof(struct test_val, foo)),
3884 BPF_MOV64_IMM(BPF_REG_2, -1),
3885 BPF_MOV64_IMM(BPF_REG_3, 0),
3886 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3887 BPF_EXIT_INSN(),
3888 },
3889 .fixup_map2 = { 3 },
3890 .errstr = "R1 min value is outside of the array range",
3891 .result = REJECT,
3892 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3893 },
3894 {
3895 "helper access to adjusted map (via const reg): full range",
3896 .insns = {
3897 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3899 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3900 BPF_LD_MAP_FD(BPF_REG_1, 0),
3901 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3902 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3903 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3904 BPF_MOV64_IMM(BPF_REG_3,
3905 offsetof(struct test_val, foo)),
3906 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3907 BPF_MOV64_IMM(BPF_REG_2,
3908 sizeof(struct test_val) -
3909 offsetof(struct test_val, foo)),
3910 BPF_MOV64_IMM(BPF_REG_3, 0),
3911 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3912 BPF_EXIT_INSN(),
3913 },
3914 .fixup_map2 = { 3 },
3915 .result = ACCEPT,
3916 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3917 },
3918 {
3919 "helper access to adjusted map (via const reg): partial range",
3920 .insns = {
3921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3923 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3924 BPF_LD_MAP_FD(BPF_REG_1, 0),
3925 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3927 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3928 BPF_MOV64_IMM(BPF_REG_3,
3929 offsetof(struct test_val, foo)),
3930 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3931 BPF_MOV64_IMM(BPF_REG_2, 8),
3932 BPF_MOV64_IMM(BPF_REG_3, 0),
3933 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3934 BPF_EXIT_INSN(),
3935 },
3936 .fixup_map2 = { 3 },
3937 .result = ACCEPT,
3938 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3939 },
3940 {
3941 "helper access to adjusted map (via const reg): empty range",
3942 .insns = {
3943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3945 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3946 BPF_LD_MAP_FD(BPF_REG_1, 0),
3947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3950 BPF_MOV64_IMM(BPF_REG_3, 0),
3951 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3952 BPF_MOV64_IMM(BPF_REG_2, 0),
3953 BPF_MOV64_IMM(BPF_REG_3, 0),
3954 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3955 BPF_EXIT_INSN(),
3956 },
3957 .fixup_map2 = { 3 },
3958 .errstr = "R1 min value is outside of the array range",
3959 .result = REJECT,
3960 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3961 },
3962 {
3963 "helper access to adjusted map (via const reg): out-of-bound range",
3964 .insns = {
3965 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3966 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3967 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3968 BPF_LD_MAP_FD(BPF_REG_1, 0),
3969 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3970 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3971 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3972 BPF_MOV64_IMM(BPF_REG_3,
3973 offsetof(struct test_val, foo)),
3974 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3975 BPF_MOV64_IMM(BPF_REG_2,
3976 sizeof(struct test_val) -
3977 offsetof(struct test_val, foo) + 8),
3978 BPF_MOV64_IMM(BPF_REG_3, 0),
3979 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3980 BPF_EXIT_INSN(),
3981 },
3982 .fixup_map2 = { 3 },
3983 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3984 .result = REJECT,
3985 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3986 },
3987 {
3988 "helper access to adjusted map (via const reg): negative range (> adjustment)",
3989 .insns = {
3990 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3992 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3993 BPF_LD_MAP_FD(BPF_REG_1, 0),
3994 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3995 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3996 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3997 BPF_MOV64_IMM(BPF_REG_3,
3998 offsetof(struct test_val, foo)),
3999 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4000 BPF_MOV64_IMM(BPF_REG_2, -8),
4001 BPF_MOV64_IMM(BPF_REG_3, 0),
4002 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4003 BPF_EXIT_INSN(),
4004 },
4005 .fixup_map2 = { 3 },
4006 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
4007 .result = REJECT,
4008 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4009 },
4010 {
4011 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4012 .insns = {
4013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4015 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4016 BPF_LD_MAP_FD(BPF_REG_1, 0),
4017 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4020 BPF_MOV64_IMM(BPF_REG_3,
4021 offsetof(struct test_val, foo)),
4022 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4023 BPF_MOV64_IMM(BPF_REG_2, -1),
4024 BPF_MOV64_IMM(BPF_REG_3, 0),
4025 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4026 BPF_EXIT_INSN(),
4027 },
4028 .fixup_map2 = { 3 },
4029 .errstr = "R1 min value is outside of the array range",
4030 .result = REJECT,
4031 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4032 },
4033 {
4034 "helper access to adjusted map (via variable): full range",
4035 .insns = {
4036 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4038 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4039 BPF_LD_MAP_FD(BPF_REG_1, 0),
4040 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4041 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4042 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4043 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4044 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4045 offsetof(struct test_val, foo), 4),
4046 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4047 BPF_MOV64_IMM(BPF_REG_2,
4048 sizeof(struct test_val) -
4049 offsetof(struct test_val, foo)),
4050 BPF_MOV64_IMM(BPF_REG_3, 0),
4051 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4052 BPF_EXIT_INSN(),
4053 },
4054 .fixup_map2 = { 3 },
4055 .result = ACCEPT,
4056 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4057 },
4058 {
4059 "helper access to adjusted map (via variable): partial range",
4060 .insns = {
4061 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4063 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4064 BPF_LD_MAP_FD(BPF_REG_1, 0),
4065 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4066 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4067 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4068 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4069 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4070 offsetof(struct test_val, foo), 4),
4071 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4072 BPF_MOV64_IMM(BPF_REG_2, 8),
4073 BPF_MOV64_IMM(BPF_REG_3, 0),
4074 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4075 BPF_EXIT_INSN(),
4076 },
4077 .fixup_map2 = { 3 },
4078 .result = ACCEPT,
4079 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4080 },
4081 {
4082 "helper access to adjusted map (via variable): empty range",
4083 .insns = {
4084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4087 BPF_LD_MAP_FD(BPF_REG_1, 0),
4088 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4089 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4090 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4091 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4092 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4093 offsetof(struct test_val, foo), 4),
4094 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4095 BPF_MOV64_IMM(BPF_REG_2, 0),
4096 BPF_MOV64_IMM(BPF_REG_3, 0),
4097 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4098 BPF_EXIT_INSN(),
4099 },
4100 .fixup_map2 = { 3 },
4101 .errstr = "R1 min value is outside of the array range",
4102 .result = REJECT,
4103 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4104 },
4105 {
4106 "helper access to adjusted map (via variable): no max check",
4107 .insns = {
4108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4110 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4111 BPF_LD_MAP_FD(BPF_REG_1, 0),
4112 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4115 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4116 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4117 BPF_MOV64_IMM(BPF_REG_2, 0),
4118 BPF_MOV64_IMM(BPF_REG_3, 0),
4119 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4120 BPF_EXIT_INSN(),
4121 },
4122 .fixup_map2 = { 3 },
4123 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
4124 .result = REJECT,
4125 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4126 },
4127 {
4128 "helper access to adjusted map (via variable): wrong max check",
4129 .insns = {
4130 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4132 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4133 BPF_LD_MAP_FD(BPF_REG_1, 0),
4134 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4135 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4136 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4138 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4139 offsetof(struct test_val, foo), 4),
4140 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4141 BPF_MOV64_IMM(BPF_REG_2,
4142 sizeof(struct test_val) -
4143 offsetof(struct test_val, foo) + 1),
4144 BPF_MOV64_IMM(BPF_REG_3, 0),
4145 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4146 BPF_EXIT_INSN(),
4147 },
4148 .fixup_map2 = { 3 },
4149 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4150 .result = REJECT,
4151 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4152 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08004153 {
4154 "map element value is preserved across register spilling",
4155 .insns = {
4156 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4158 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4159 BPF_LD_MAP_FD(BPF_REG_1, 0),
4160 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4162 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4163 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4165 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4166 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4167 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4168 BPF_EXIT_INSN(),
4169 },
4170 .fixup_map2 = { 3 },
4171 .errstr_unpriv = "R0 leaks addr",
4172 .result = ACCEPT,
4173 .result_unpriv = REJECT,
4174 },
4175 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004176 "map element value or null is marked on register spilling",
4177 .insns = {
4178 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4180 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4181 BPF_LD_MAP_FD(BPF_REG_1, 0),
4182 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4185 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4186 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4187 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4188 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4189 BPF_EXIT_INSN(),
4190 },
4191 .fixup_map2 = { 3 },
4192 .errstr_unpriv = "R0 leaks addr",
4193 .result = ACCEPT,
4194 .result_unpriv = REJECT,
4195 },
4196 {
4197 "map element value store of cleared call register",
4198 .insns = {
4199 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4201 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4202 BPF_LD_MAP_FD(BPF_REG_1, 0),
4203 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4205 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4206 BPF_EXIT_INSN(),
4207 },
4208 .fixup_map2 = { 3 },
4209 .errstr_unpriv = "R1 !read_ok",
4210 .errstr = "R1 !read_ok",
4211 .result = REJECT,
4212 .result_unpriv = REJECT,
4213 },
4214 {
4215 "map element value with unaligned store",
4216 .insns = {
4217 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4219 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4220 BPF_LD_MAP_FD(BPF_REG_1, 0),
4221 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4222 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4224 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4225 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4226 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4227 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4228 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4229 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4230 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4232 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4233 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4234 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4235 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4237 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4238 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4239 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4240 BPF_EXIT_INSN(),
4241 },
4242 .fixup_map2 = { 3 },
4243 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4244 .result = ACCEPT,
4245 .result_unpriv = REJECT,
4246 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4247 },
4248 {
4249 "map element value with unaligned load",
4250 .insns = {
4251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4253 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4254 BPF_LD_MAP_FD(BPF_REG_1, 0),
4255 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4257 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4258 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4260 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4261 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4262 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4263 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4264 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4266 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4267 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4268 BPF_EXIT_INSN(),
4269 },
4270 .fixup_map2 = { 3 },
4271 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4272 .result = ACCEPT,
4273 .result_unpriv = REJECT,
4274 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4275 },
4276 {
4277 "map element value illegal alu op, 1",
4278 .insns = {
4279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4281 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4282 BPF_LD_MAP_FD(BPF_REG_1, 0),
4283 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4285 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4286 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4287 BPF_EXIT_INSN(),
4288 },
4289 .fixup_map2 = { 3 },
4290 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4291 .errstr = "invalid mem access 'inv'",
4292 .result = REJECT,
4293 .result_unpriv = REJECT,
4294 },
4295 {
4296 "map element value illegal alu op, 2",
4297 .insns = {
4298 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4300 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4301 BPF_LD_MAP_FD(BPF_REG_1, 0),
4302 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4303 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4304 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4305 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4306 BPF_EXIT_INSN(),
4307 },
4308 .fixup_map2 = { 3 },
4309 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4310 .errstr = "invalid mem access 'inv'",
4311 .result = REJECT,
4312 .result_unpriv = REJECT,
4313 },
4314 {
4315 "map element value illegal alu op, 3",
4316 .insns = {
4317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4319 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4320 BPF_LD_MAP_FD(BPF_REG_1, 0),
4321 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4323 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4324 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4325 BPF_EXIT_INSN(),
4326 },
4327 .fixup_map2 = { 3 },
4328 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4329 .errstr = "invalid mem access 'inv'",
4330 .result = REJECT,
4331 .result_unpriv = REJECT,
4332 },
4333 {
4334 "map element value illegal alu op, 4",
4335 .insns = {
4336 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4337 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4338 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4339 BPF_LD_MAP_FD(BPF_REG_1, 0),
4340 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4341 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4342 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4343 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4344 BPF_EXIT_INSN(),
4345 },
4346 .fixup_map2 = { 3 },
4347 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4348 .errstr = "invalid mem access 'inv'",
4349 .result = REJECT,
4350 .result_unpriv = REJECT,
4351 },
4352 {
4353 "map element value illegal alu op, 5",
4354 .insns = {
4355 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4357 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4358 BPF_LD_MAP_FD(BPF_REG_1, 0),
4359 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4360 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4361 BPF_MOV64_IMM(BPF_REG_3, 4096),
4362 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4364 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4365 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4366 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4367 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4368 BPF_EXIT_INSN(),
4369 },
4370 .fixup_map2 = { 3 },
4371 .errstr_unpriv = "R0 invalid mem access 'inv'",
4372 .errstr = "R0 invalid mem access 'inv'",
4373 .result = REJECT,
4374 .result_unpriv = REJECT,
4375 },
4376 {
4377 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08004378 .insns = {
4379 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4381 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4382 BPF_LD_MAP_FD(BPF_REG_1, 0),
4383 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4384 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
4386 offsetof(struct test_val, foo)),
4387 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4388 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4390 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4391 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4392 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4393 BPF_EXIT_INSN(),
4394 },
4395 .fixup_map2 = { 3 },
4396 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4397 .result = ACCEPT,
4398 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004399 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08004400 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08004401 {
4402 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
4403 .insns = {
4404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4406 BPF_MOV64_IMM(BPF_REG_0, 0),
4407 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4408 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4409 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4410 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4411 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4412 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4413 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4414 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4415 BPF_MOV64_IMM(BPF_REG_2, 16),
4416 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4417 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4418 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4419 BPF_MOV64_IMM(BPF_REG_4, 0),
4420 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4421 BPF_MOV64_IMM(BPF_REG_3, 0),
4422 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4423 BPF_MOV64_IMM(BPF_REG_0, 0),
4424 BPF_EXIT_INSN(),
4425 },
4426 .result = ACCEPT,
4427 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4428 },
4429 {
4430 "helper access to variable memory: stack, bitwise AND, zero included",
4431 .insns = {
4432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4434 BPF_MOV64_IMM(BPF_REG_2, 16),
4435 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4436 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4437 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4438 BPF_MOV64_IMM(BPF_REG_3, 0),
4439 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4440 BPF_EXIT_INSN(),
4441 },
4442 .errstr = "invalid stack type R1 off=-64 access_size=0",
4443 .result = REJECT,
4444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4445 },
4446 {
4447 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4448 .insns = {
4449 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4451 BPF_MOV64_IMM(BPF_REG_2, 16),
4452 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4453 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4454 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4455 BPF_MOV64_IMM(BPF_REG_4, 0),
4456 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4457 BPF_MOV64_IMM(BPF_REG_3, 0),
4458 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4459 BPF_MOV64_IMM(BPF_REG_0, 0),
4460 BPF_EXIT_INSN(),
4461 },
4462 .errstr = "invalid stack type R1 off=-64 access_size=65",
4463 .result = REJECT,
4464 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4465 },
4466 {
4467 "helper access to variable memory: stack, JMP, correct bounds",
4468 .insns = {
4469 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4471 BPF_MOV64_IMM(BPF_REG_0, 0),
4472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4473 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4474 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4475 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4476 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4477 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4478 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4479 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4480 BPF_MOV64_IMM(BPF_REG_2, 16),
4481 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4482 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4483 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4484 BPF_MOV64_IMM(BPF_REG_4, 0),
4485 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4486 BPF_MOV64_IMM(BPF_REG_3, 0),
4487 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4488 BPF_MOV64_IMM(BPF_REG_0, 0),
4489 BPF_EXIT_INSN(),
4490 },
4491 .result = ACCEPT,
4492 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4493 },
4494 {
4495 "helper access to variable memory: stack, JMP (signed), correct bounds",
4496 .insns = {
4497 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4499 BPF_MOV64_IMM(BPF_REG_0, 0),
4500 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4501 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4502 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4503 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4504 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4505 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4506 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4507 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4508 BPF_MOV64_IMM(BPF_REG_2, 16),
4509 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4510 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4511 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4512 BPF_MOV64_IMM(BPF_REG_4, 0),
4513 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4514 BPF_MOV64_IMM(BPF_REG_3, 0),
4515 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4516 BPF_MOV64_IMM(BPF_REG_0, 0),
4517 BPF_EXIT_INSN(),
4518 },
4519 .result = ACCEPT,
4520 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4521 },
4522 {
4523 "helper access to variable memory: stack, JMP, bounds + offset",
4524 .insns = {
4525 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4527 BPF_MOV64_IMM(BPF_REG_2, 16),
4528 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4529 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4530 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4531 BPF_MOV64_IMM(BPF_REG_4, 0),
4532 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4534 BPF_MOV64_IMM(BPF_REG_3, 0),
4535 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4536 BPF_MOV64_IMM(BPF_REG_0, 0),
4537 BPF_EXIT_INSN(),
4538 },
4539 .errstr = "invalid stack type R1 off=-64 access_size=65",
4540 .result = REJECT,
4541 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4542 },
4543 {
4544 "helper access to variable memory: stack, JMP, wrong max",
4545 .insns = {
4546 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4548 BPF_MOV64_IMM(BPF_REG_2, 16),
4549 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4550 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4551 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4552 BPF_MOV64_IMM(BPF_REG_4, 0),
4553 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4554 BPF_MOV64_IMM(BPF_REG_3, 0),
4555 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4556 BPF_MOV64_IMM(BPF_REG_0, 0),
4557 BPF_EXIT_INSN(),
4558 },
4559 .errstr = "invalid stack type R1 off=-64 access_size=65",
4560 .result = REJECT,
4561 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4562 },
4563 {
4564 "helper access to variable memory: stack, JMP, no max check",
4565 .insns = {
4566 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4568 BPF_MOV64_IMM(BPF_REG_2, 16),
4569 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4570 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4571 BPF_MOV64_IMM(BPF_REG_4, 0),
4572 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4573 BPF_MOV64_IMM(BPF_REG_3, 0),
4574 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4575 BPF_MOV64_IMM(BPF_REG_0, 0),
4576 BPF_EXIT_INSN(),
4577 },
4578 .errstr = "R2 unbounded memory access",
4579 .result = REJECT,
4580 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4581 },
4582 {
4583 "helper access to variable memory: stack, JMP, no min check",
4584 .insns = {
4585 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4587 BPF_MOV64_IMM(BPF_REG_2, 16),
4588 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4589 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4590 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4591 BPF_MOV64_IMM(BPF_REG_3, 0),
4592 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4593 BPF_MOV64_IMM(BPF_REG_0, 0),
4594 BPF_EXIT_INSN(),
4595 },
4596 .errstr = "invalid stack type R1 off=-64 access_size=0",
4597 .result = REJECT,
4598 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4599 },
4600 {
4601 "helper access to variable memory: stack, JMP (signed), no min check",
4602 .insns = {
4603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4605 BPF_MOV64_IMM(BPF_REG_2, 16),
4606 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4607 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4608 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4609 BPF_MOV64_IMM(BPF_REG_3, 0),
4610 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4611 BPF_MOV64_IMM(BPF_REG_0, 0),
4612 BPF_EXIT_INSN(),
4613 },
4614 .errstr = "R2 min value is negative",
4615 .result = REJECT,
4616 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4617 },
4618 {
4619 "helper access to variable memory: map, JMP, correct bounds",
4620 .insns = {
4621 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4623 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4624 BPF_LD_MAP_FD(BPF_REG_1, 0),
4625 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4626 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4627 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4628 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4629 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4630 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4631 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4632 sizeof(struct test_val), 4),
4633 BPF_MOV64_IMM(BPF_REG_4, 0),
4634 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4635 BPF_MOV64_IMM(BPF_REG_3, 0),
4636 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4637 BPF_MOV64_IMM(BPF_REG_0, 0),
4638 BPF_EXIT_INSN(),
4639 },
4640 .fixup_map2 = { 3 },
4641 .result = ACCEPT,
4642 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4643 },
4644 {
4645 "helper access to variable memory: map, JMP, wrong max",
4646 .insns = {
4647 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4649 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4650 BPF_LD_MAP_FD(BPF_REG_1, 0),
4651 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4653 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4654 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4655 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4656 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4657 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4658 sizeof(struct test_val) + 1, 4),
4659 BPF_MOV64_IMM(BPF_REG_4, 0),
4660 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4661 BPF_MOV64_IMM(BPF_REG_3, 0),
4662 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4663 BPF_MOV64_IMM(BPF_REG_0, 0),
4664 BPF_EXIT_INSN(),
4665 },
4666 .fixup_map2 = { 3 },
4667 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4668 .result = REJECT,
4669 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4670 },
4671 {
4672 "helper access to variable memory: map adjusted, JMP, correct bounds",
4673 .insns = {
4674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4676 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4677 BPF_LD_MAP_FD(BPF_REG_1, 0),
4678 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4680 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4682 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4683 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4684 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4685 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4686 sizeof(struct test_val) - 20, 4),
4687 BPF_MOV64_IMM(BPF_REG_4, 0),
4688 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4689 BPF_MOV64_IMM(BPF_REG_3, 0),
4690 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4691 BPF_MOV64_IMM(BPF_REG_0, 0),
4692 BPF_EXIT_INSN(),
4693 },
4694 .fixup_map2 = { 3 },
4695 .result = ACCEPT,
4696 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4697 },
4698 {
4699 "helper access to variable memory: map adjusted, JMP, wrong max",
4700 .insns = {
4701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4703 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4704 BPF_LD_MAP_FD(BPF_REG_1, 0),
4705 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4706 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4707 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4709 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4710 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4711 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4712 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4713 sizeof(struct test_val) - 19, 4),
4714 BPF_MOV64_IMM(BPF_REG_4, 0),
4715 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4716 BPF_MOV64_IMM(BPF_REG_3, 0),
4717 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4718 BPF_MOV64_IMM(BPF_REG_0, 0),
4719 BPF_EXIT_INSN(),
4720 },
4721 .fixup_map2 = { 3 },
4722 .errstr = "R1 min value is outside of the array range",
4723 .result = REJECT,
4724 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4725 },
4726 {
4727 "helper access to variable memory: size > 0 not allowed on NULL",
4728 .insns = {
4729 BPF_MOV64_IMM(BPF_REG_1, 0),
4730 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004731 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4732 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004733 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4734 BPF_MOV64_IMM(BPF_REG_3, 0),
4735 BPF_MOV64_IMM(BPF_REG_4, 0),
4736 BPF_MOV64_IMM(BPF_REG_5, 0),
4737 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4738 BPF_EXIT_INSN(),
4739 },
4740 .errstr = "R1 type=imm expected=fp",
4741 .result = REJECT,
4742 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4743 },
4744 {
4745 "helper access to variable memory: size = 0 not allowed on != NULL",
4746 .insns = {
4747 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4749 BPF_MOV64_IMM(BPF_REG_2, 0),
4750 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4751 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4752 BPF_MOV64_IMM(BPF_REG_3, 0),
4753 BPF_MOV64_IMM(BPF_REG_4, 0),
4754 BPF_MOV64_IMM(BPF_REG_5, 0),
4755 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4756 BPF_EXIT_INSN(),
4757 },
4758 .errstr = "invalid stack type R1 off=-8 access_size=0",
4759 .result = REJECT,
4760 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4761 },
4762 {
4763 "helper access to variable memory: 8 bytes leak",
4764 .insns = {
4765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4767 BPF_MOV64_IMM(BPF_REG_0, 0),
4768 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4769 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4770 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4771 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4772 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4773 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4774 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4775 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004776 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4777 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004778 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4780 BPF_MOV64_IMM(BPF_REG_3, 0),
4781 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4782 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4783 BPF_EXIT_INSN(),
4784 },
4785 .errstr = "invalid indirect read from stack off -64+32 size 64",
4786 .result = REJECT,
4787 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4788 },
4789 {
4790 "helper access to variable memory: 8 bytes no leak (init memory)",
4791 .insns = {
4792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4793 BPF_MOV64_IMM(BPF_REG_0, 0),
4794 BPF_MOV64_IMM(BPF_REG_0, 0),
4795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4798 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4800 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4801 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4802 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4804 BPF_MOV64_IMM(BPF_REG_2, 0),
4805 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4807 BPF_MOV64_IMM(BPF_REG_3, 0),
4808 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4809 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4810 BPF_EXIT_INSN(),
4811 },
4812 .result = ACCEPT,
4813 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4814 },
Josef Bacik29200c12017-02-03 16:25:23 -05004815 {
4816 "invalid and of negative number",
4817 .insns = {
4818 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4819 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4821 BPF_LD_MAP_FD(BPF_REG_1, 0),
4822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4823 BPF_FUNC_map_lookup_elem),
4824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4825 BPF_MOV64_IMM(BPF_REG_1, 6),
4826 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4827 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4828 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4829 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4830 offsetof(struct test_val, foo)),
4831 BPF_EXIT_INSN(),
4832 },
4833 .fixup_map2 = { 3 },
4834 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4835 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4836 .result = REJECT,
4837 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004838 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05004839 },
4840 {
4841 "invalid range check",
4842 .insns = {
4843 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4846 BPF_LD_MAP_FD(BPF_REG_1, 0),
4847 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4848 BPF_FUNC_map_lookup_elem),
4849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4850 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4851 BPF_MOV64_IMM(BPF_REG_9, 1),
4852 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4853 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4854 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4855 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4856 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4857 BPF_MOV32_IMM(BPF_REG_3, 1),
4858 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4859 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4860 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4861 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4862 BPF_MOV64_REG(BPF_REG_0, 0),
4863 BPF_EXIT_INSN(),
4864 },
4865 .fixup_map2 = { 3 },
4866 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4867 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4868 .result = REJECT,
4869 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004870 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004871 },
4872 {
4873 "map in map access",
4874 .insns = {
4875 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4878 BPF_LD_MAP_FD(BPF_REG_1, 0),
4879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4880 BPF_FUNC_map_lookup_elem),
4881 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4882 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4883 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4885 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4886 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4887 BPF_FUNC_map_lookup_elem),
4888 BPF_MOV64_REG(BPF_REG_0, 0),
4889 BPF_EXIT_INSN(),
4890 },
4891 .fixup_map_in_map = { 3 },
4892 .result = ACCEPT,
4893 },
4894 {
4895 "invalid inner map pointer",
4896 .insns = {
4897 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4898 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4900 BPF_LD_MAP_FD(BPF_REG_1, 0),
4901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4902 BPF_FUNC_map_lookup_elem),
4903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4904 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4905 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4910 BPF_FUNC_map_lookup_elem),
4911 BPF_MOV64_REG(BPF_REG_0, 0),
4912 BPF_EXIT_INSN(),
4913 },
4914 .fixup_map_in_map = { 3 },
4915 .errstr = "R1 type=inv expected=map_ptr",
4916 .errstr_unpriv = "R1 pointer arithmetic prohibited",
4917 .result = REJECT,
4918 },
4919 {
4920 "forgot null checking on the inner map pointer",
4921 .insns = {
4922 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4923 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4925 BPF_LD_MAP_FD(BPF_REG_1, 0),
4926 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4927 BPF_FUNC_map_lookup_elem),
4928 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4929 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4931 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4933 BPF_FUNC_map_lookup_elem),
4934 BPF_MOV64_REG(BPF_REG_0, 0),
4935 BPF_EXIT_INSN(),
4936 },
4937 .fixup_map_in_map = { 3 },
4938 .errstr = "R1 type=map_value_or_null expected=map_ptr",
4939 .result = REJECT,
Josef Bacik29200c12017-02-03 16:25:23 -05004940 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004941};
4942
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004943static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004944{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004945 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004946
4947 for (len = MAX_INSNS - 1; len > 0; --len)
4948 if (fp[len].code != 0 || fp[len].imm != 0)
4949 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004950 return len + 1;
4951}
4952
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004953static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004954{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004955 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004956
Mickaël Salaünf4874d02017-02-10 00:21:43 +01004957 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004958 size_value, max_elem, BPF_F_NO_PREALLOC);
4959 if (fd < 0)
4960 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004961
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004962 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004963}
4964
4965static int create_prog_array(void)
4966{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004967 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004968
Mickaël Salaünf4874d02017-02-10 00:21:43 +01004969 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004970 sizeof(int), 4, 0);
4971 if (fd < 0)
4972 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004973
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004974 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004975}
4976
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004977static int create_map_in_map(void)
4978{
4979 int inner_map_fd, outer_map_fd;
4980
4981 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
4982 sizeof(int), 1, 0);
4983 if (inner_map_fd < 0) {
4984 printf("Failed to create array '%s'!\n", strerror(errno));
4985 return inner_map_fd;
4986 }
4987
4988 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
4989 sizeof(int), inner_map_fd, 1, 0);
4990 if (outer_map_fd < 0)
4991 printf("Failed to create array of maps '%s'!\n",
4992 strerror(errno));
4993
4994 close(inner_map_fd);
4995
4996 return outer_map_fd;
4997}
4998
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004999static char bpf_vlog[32768];
5000
5001static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005002 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005003{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005004 int *fixup_map1 = test->fixup_map1;
5005 int *fixup_map2 = test->fixup_map2;
5006 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005007 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005008
5009 /* Allocating HTs with 1 elem is fine here, since we only test
5010 * for verifier and not do a runtime lookup, so the only thing
5011 * that really matters is value size in this case.
5012 */
5013 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005014 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005015 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005016 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005017 fixup_map1++;
5018 } while (*fixup_map1);
5019 }
5020
5021 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005022 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005023 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005024 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005025 fixup_map2++;
5026 } while (*fixup_map2);
5027 }
5028
5029 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005030 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005031 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005032 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005033 fixup_prog++;
5034 } while (*fixup_prog);
5035 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005036
5037 if (*fixup_map_in_map) {
5038 map_fds[3] = create_map_in_map();
5039 do {
5040 prog[*fixup_map_in_map].imm = map_fds[3];
5041 fixup_map_in_map++;
5042 } while (*fixup_map_in_map);
5043 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005044}
5045
5046static void do_test_single(struct bpf_test *test, bool unpriv,
5047 int *passes, int *errors)
5048{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005049 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005050 struct bpf_insn *prog = test->insns;
5051 int prog_len = probe_filter_length(prog);
5052 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005053 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005054 const char *expected_err;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005055 int i;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005056
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005057 for (i = 0; i < MAX_NR_MAPS; i++)
5058 map_fds[i] = -1;
5059
5060 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005061
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +01005062 fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
5063 prog, prog_len, "GPL", 0, bpf_vlog,
5064 sizeof(bpf_vlog));
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005065
5066 expected_ret = unpriv && test->result_unpriv != UNDEF ?
5067 test->result_unpriv : test->result;
5068 expected_err = unpriv && test->errstr_unpriv ?
5069 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005070
5071 reject_from_alignment = fd_prog < 0 &&
5072 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
5073 strstr(bpf_vlog, "Unknown alignment.");
5074#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
5075 if (reject_from_alignment) {
5076 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
5077 strerror(errno));
5078 goto fail_log;
5079 }
5080#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005081 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005082 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005083 printf("FAIL\nFailed to load prog '%s'!\n",
5084 strerror(errno));
5085 goto fail_log;
5086 }
5087 } else {
5088 if (fd_prog >= 0) {
5089 printf("FAIL\nUnexpected success to load!\n");
5090 goto fail_log;
5091 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005092 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005093 printf("FAIL\nUnexpected error message!\n");
5094 goto fail_log;
5095 }
5096 }
5097
5098 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005099 printf("OK%s\n", reject_from_alignment ?
5100 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005101close_fds:
5102 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005103 for (i = 0; i < MAX_NR_MAPS; i++)
5104 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005105 sched_yield();
5106 return;
5107fail_log:
5108 (*errors)++;
5109 printf("%s", bpf_vlog);
5110 goto close_fds;
5111}
5112
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005113static bool is_admin(void)
5114{
5115 cap_t caps;
5116 cap_flag_value_t sysadmin = CAP_CLEAR;
5117 const cap_value_t cap_val = CAP_SYS_ADMIN;
5118
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08005119#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005120 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
5121 perror("cap_get_flag");
5122 return false;
5123 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08005124#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005125 caps = cap_get_proc();
5126 if (!caps) {
5127 perror("cap_get_proc");
5128 return false;
5129 }
5130 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
5131 perror("cap_get_flag");
5132 if (cap_free(caps))
5133 perror("cap_free");
5134 return (sysadmin == CAP_SET);
5135}
5136
5137static int set_admin(bool admin)
5138{
5139 cap_t caps;
5140 const cap_value_t cap_val = CAP_SYS_ADMIN;
5141 int ret = -1;
5142
5143 caps = cap_get_proc();
5144 if (!caps) {
5145 perror("cap_get_proc");
5146 return -1;
5147 }
5148 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
5149 admin ? CAP_SET : CAP_CLEAR)) {
5150 perror("cap_set_flag");
5151 goto out;
5152 }
5153 if (cap_set_proc(caps)) {
5154 perror("cap_set_proc");
5155 goto out;
5156 }
5157 ret = 0;
5158out:
5159 if (cap_free(caps))
5160 perror("cap_free");
5161 return ret;
5162}
5163
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005164static int do_test(bool unpriv, unsigned int from, unsigned int to)
5165{
5166 int i, passes = 0, errors = 0;
5167
5168 for (i = from; i < to; i++) {
5169 struct bpf_test *test = &tests[i];
5170
5171 /* Program types that are not supported by non-root we
5172 * skip right away.
5173 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005174 if (!test->prog_type) {
5175 if (!unpriv)
5176 set_admin(false);
5177 printf("#%d/u %s ", i, test->descr);
5178 do_test_single(test, true, &passes, &errors);
5179 if (!unpriv)
5180 set_admin(true);
5181 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005182
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005183 if (!unpriv) {
5184 printf("#%d/p %s ", i, test->descr);
5185 do_test_single(test, false, &passes, &errors);
5186 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005187 }
5188
5189 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
5190 return errors ? -errors : 0;
5191}
5192
5193int main(int argc, char **argv)
5194{
5195 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
5196 struct rlimit rlim = { 1 << 20, 1 << 20 };
5197 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005198 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005199
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005200 if (argc == 3) {
5201 unsigned int l = atoi(argv[argc - 2]);
5202 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005203
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005204 if (l < to && u < to) {
5205 from = l;
5206 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005207 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005208 } else if (argc == 2) {
5209 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005210
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005211 if (t < to) {
5212 from = t;
5213 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005214 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005215 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005216
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005217 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
5218 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005219}