blob: 878bd60da376962ff448caedee44dd3acd9cd834 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011#include <stdio.h>
12#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070015#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070016#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020017#include <sched.h>
18
Mickaël Salaünd02d8982017-02-10 00:21:37 +010019#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070020#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070021
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020022#include <linux/unistd.h>
23#include <linux/filter.h>
24#include <linux/bpf_perf_event.h>
25#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070026
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020027#include "../../../include/linux/filter.h"
28
29#include "bpf_sys.h"
30
31#ifndef ARRAY_SIZE
32# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
33#endif
34
35#define MAX_INSNS 512
36#define MAX_FIXUPS 8
Alexei Starovoitovbf508872015-10-07 22:23:23 -070037
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070038struct bpf_test {
39 const char *descr;
40 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020041 int fixup_map1[MAX_FIXUPS];
42 int fixup_map2[MAX_FIXUPS];
43 int fixup_prog[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070044 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070045 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070046 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070047 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070048 ACCEPT,
49 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070050 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070051 enum bpf_prog_type prog_type;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070052};
53
Josef Bacik48461132016-09-28 10:54:32 -040054/* Note we want this to be 64 bit aligned so that the end of our array is
55 * actually the end of the structure.
56 */
57#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040058
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020059struct test_val {
60 unsigned int index;
61 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040062};
63
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070064static struct bpf_test tests[] = {
65 {
66 "add+sub+mul",
67 .insns = {
68 BPF_MOV64_IMM(BPF_REG_1, 1),
69 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
70 BPF_MOV64_IMM(BPF_REG_2, 3),
71 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
72 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
73 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
74 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
75 BPF_EXIT_INSN(),
76 },
77 .result = ACCEPT,
78 },
79 {
80 "unreachable",
81 .insns = {
82 BPF_EXIT_INSN(),
83 BPF_EXIT_INSN(),
84 },
85 .errstr = "unreachable",
86 .result = REJECT,
87 },
88 {
89 "unreachable2",
90 .insns = {
91 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
92 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
93 BPF_EXIT_INSN(),
94 },
95 .errstr = "unreachable",
96 .result = REJECT,
97 },
98 {
99 "out of range jump",
100 .insns = {
101 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
102 BPF_EXIT_INSN(),
103 },
104 .errstr = "jump out of range",
105 .result = REJECT,
106 },
107 {
108 "out of range jump2",
109 .insns = {
110 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
111 BPF_EXIT_INSN(),
112 },
113 .errstr = "jump out of range",
114 .result = REJECT,
115 },
116 {
117 "test1 ld_imm64",
118 .insns = {
119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
120 BPF_LD_IMM64(BPF_REG_0, 0),
121 BPF_LD_IMM64(BPF_REG_0, 0),
122 BPF_LD_IMM64(BPF_REG_0, 1),
123 BPF_LD_IMM64(BPF_REG_0, 1),
124 BPF_MOV64_IMM(BPF_REG_0, 2),
125 BPF_EXIT_INSN(),
126 },
127 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700128 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700129 .result = REJECT,
130 },
131 {
132 "test2 ld_imm64",
133 .insns = {
134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
135 BPF_LD_IMM64(BPF_REG_0, 0),
136 BPF_LD_IMM64(BPF_REG_0, 0),
137 BPF_LD_IMM64(BPF_REG_0, 1),
138 BPF_LD_IMM64(BPF_REG_0, 1),
139 BPF_EXIT_INSN(),
140 },
141 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700142 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700143 .result = REJECT,
144 },
145 {
146 "test3 ld_imm64",
147 .insns = {
148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
149 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
150 BPF_LD_IMM64(BPF_REG_0, 0),
151 BPF_LD_IMM64(BPF_REG_0, 0),
152 BPF_LD_IMM64(BPF_REG_0, 1),
153 BPF_LD_IMM64(BPF_REG_0, 1),
154 BPF_EXIT_INSN(),
155 },
156 .errstr = "invalid bpf_ld_imm64 insn",
157 .result = REJECT,
158 },
159 {
160 "test4 ld_imm64",
161 .insns = {
162 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
163 BPF_EXIT_INSN(),
164 },
165 .errstr = "invalid bpf_ld_imm64 insn",
166 .result = REJECT,
167 },
168 {
169 "test5 ld_imm64",
170 .insns = {
171 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
172 },
173 .errstr = "invalid bpf_ld_imm64 insn",
174 .result = REJECT,
175 },
176 {
177 "no bpf_exit",
178 .insns = {
179 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
180 },
181 .errstr = "jump out of range",
182 .result = REJECT,
183 },
184 {
185 "loop (back-edge)",
186 .insns = {
187 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
188 BPF_EXIT_INSN(),
189 },
190 .errstr = "back-edge",
191 .result = REJECT,
192 },
193 {
194 "loop2 (back-edge)",
195 .insns = {
196 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
197 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
198 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
199 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
200 BPF_EXIT_INSN(),
201 },
202 .errstr = "back-edge",
203 .result = REJECT,
204 },
205 {
206 "conditional loop",
207 .insns = {
208 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
210 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
211 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
212 BPF_EXIT_INSN(),
213 },
214 .errstr = "back-edge",
215 .result = REJECT,
216 },
217 {
218 "read uninitialized register",
219 .insns = {
220 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
221 BPF_EXIT_INSN(),
222 },
223 .errstr = "R2 !read_ok",
224 .result = REJECT,
225 },
226 {
227 "read invalid register",
228 .insns = {
229 BPF_MOV64_REG(BPF_REG_0, -1),
230 BPF_EXIT_INSN(),
231 },
232 .errstr = "R15 is invalid",
233 .result = REJECT,
234 },
235 {
236 "program doesn't init R0 before exit",
237 .insns = {
238 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
239 BPF_EXIT_INSN(),
240 },
241 .errstr = "R0 !read_ok",
242 .result = REJECT,
243 },
244 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700245 "program doesn't init R0 before exit in all branches",
246 .insns = {
247 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
248 BPF_MOV64_IMM(BPF_REG_0, 1),
249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
250 BPF_EXIT_INSN(),
251 },
252 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700253 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700254 .result = REJECT,
255 },
256 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700257 "stack out of bounds",
258 .insns = {
259 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
260 BPF_EXIT_INSN(),
261 },
262 .errstr = "invalid stack",
263 .result = REJECT,
264 },
265 {
266 "invalid call insn1",
267 .insns = {
268 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
269 BPF_EXIT_INSN(),
270 },
271 .errstr = "BPF_CALL uses reserved",
272 .result = REJECT,
273 },
274 {
275 "invalid call insn2",
276 .insns = {
277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
278 BPF_EXIT_INSN(),
279 },
280 .errstr = "BPF_CALL uses reserved",
281 .result = REJECT,
282 },
283 {
284 "invalid function call",
285 .insns = {
286 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
287 BPF_EXIT_INSN(),
288 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100289 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700290 .result = REJECT,
291 },
292 {
293 "uninitialized stack1",
294 .insns = {
295 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
297 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200298 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
299 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700300 BPF_EXIT_INSN(),
301 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200302 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700303 .errstr = "invalid indirect read from stack",
304 .result = REJECT,
305 },
306 {
307 "uninitialized stack2",
308 .insns = {
309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
310 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
311 BPF_EXIT_INSN(),
312 },
313 .errstr = "invalid read from stack",
314 .result = REJECT,
315 },
316 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200317 "invalid argument register",
318 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
320 BPF_FUNC_get_cgroup_classid),
321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
322 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200323 BPF_EXIT_INSN(),
324 },
325 .errstr = "R1 !read_ok",
326 .result = REJECT,
327 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
328 },
329 {
330 "non-invalid argument register",
331 .insns = {
332 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
334 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200335 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
337 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200338 BPF_EXIT_INSN(),
339 },
340 .result = ACCEPT,
341 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
342 },
343 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700344 "check valid spill/fill",
345 .insns = {
346 /* spill R1(ctx) into stack */
347 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700348 /* fill it back into R2 */
349 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700350 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100351 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
352 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700353 BPF_EXIT_INSN(),
354 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700355 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700356 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700357 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700358 },
359 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200360 "check valid spill/fill, skb mark",
361 .insns = {
362 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
363 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
364 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
365 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
366 offsetof(struct __sk_buff, mark)),
367 BPF_EXIT_INSN(),
368 },
369 .result = ACCEPT,
370 .result_unpriv = ACCEPT,
371 },
372 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700373 "check corrupted spill/fill",
374 .insns = {
375 /* spill R1(ctx) into stack */
376 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700377 /* mess up with R1 pointer on stack */
378 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700379 /* fill back into R0 should fail */
380 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700381 BPF_EXIT_INSN(),
382 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700383 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700384 .errstr = "corrupted spill",
385 .result = REJECT,
386 },
387 {
388 "invalid src register in STX",
389 .insns = {
390 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
391 BPF_EXIT_INSN(),
392 },
393 .errstr = "R15 is invalid",
394 .result = REJECT,
395 },
396 {
397 "invalid dst register in STX",
398 .insns = {
399 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
400 BPF_EXIT_INSN(),
401 },
402 .errstr = "R14 is invalid",
403 .result = REJECT,
404 },
405 {
406 "invalid dst register in ST",
407 .insns = {
408 BPF_ST_MEM(BPF_B, 14, -1, -1),
409 BPF_EXIT_INSN(),
410 },
411 .errstr = "R14 is invalid",
412 .result = REJECT,
413 },
414 {
415 "invalid src register in LDX",
416 .insns = {
417 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
418 BPF_EXIT_INSN(),
419 },
420 .errstr = "R12 is invalid",
421 .result = REJECT,
422 },
423 {
424 "invalid dst register in LDX",
425 .insns = {
426 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
427 BPF_EXIT_INSN(),
428 },
429 .errstr = "R11 is invalid",
430 .result = REJECT,
431 },
432 {
433 "junk insn",
434 .insns = {
435 BPF_RAW_INSN(0, 0, 0, 0, 0),
436 BPF_EXIT_INSN(),
437 },
438 .errstr = "invalid BPF_LD_IMM",
439 .result = REJECT,
440 },
441 {
442 "junk insn2",
443 .insns = {
444 BPF_RAW_INSN(1, 0, 0, 0, 0),
445 BPF_EXIT_INSN(),
446 },
447 .errstr = "BPF_LDX uses reserved fields",
448 .result = REJECT,
449 },
450 {
451 "junk insn3",
452 .insns = {
453 BPF_RAW_INSN(-1, 0, 0, 0, 0),
454 BPF_EXIT_INSN(),
455 },
456 .errstr = "invalid BPF_ALU opcode f0",
457 .result = REJECT,
458 },
459 {
460 "junk insn4",
461 .insns = {
462 BPF_RAW_INSN(-1, -1, -1, -1, -1),
463 BPF_EXIT_INSN(),
464 },
465 .errstr = "invalid BPF_ALU opcode f0",
466 .result = REJECT,
467 },
468 {
469 "junk insn5",
470 .insns = {
471 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
472 BPF_EXIT_INSN(),
473 },
474 .errstr = "BPF_ALU uses reserved fields",
475 .result = REJECT,
476 },
477 {
478 "misaligned read from stack",
479 .insns = {
480 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
481 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
482 BPF_EXIT_INSN(),
483 },
484 .errstr = "misaligned access",
485 .result = REJECT,
486 },
487 {
488 "invalid map_fd for function call",
489 .insns = {
490 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
491 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
493 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
495 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700496 BPF_EXIT_INSN(),
497 },
498 .errstr = "fd 0 is not pointing to valid bpf_map",
499 .result = REJECT,
500 },
501 {
502 "don't check return value before access",
503 .insns = {
504 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
505 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
507 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200508 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
509 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700510 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
511 BPF_EXIT_INSN(),
512 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200513 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700514 .errstr = "R0 invalid mem access 'map_value_or_null'",
515 .result = REJECT,
516 },
517 {
518 "access memory with incorrect alignment",
519 .insns = {
520 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
521 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
523 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
525 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700526 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
527 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
528 BPF_EXIT_INSN(),
529 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200530 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700531 .errstr = "misaligned access",
532 .result = REJECT,
533 },
534 {
535 "sometimes access memory with incorrect alignment",
536 .insns = {
537 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
540 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
542 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700543 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
544 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
545 BPF_EXIT_INSN(),
546 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
547 BPF_EXIT_INSN(),
548 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200549 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700550 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700551 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700552 .result = REJECT,
553 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700554 {
555 "jump test 1",
556 .insns = {
557 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
558 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
560 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
562 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
563 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
564 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
566 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
568 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
570 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
571 BPF_MOV64_IMM(BPF_REG_0, 0),
572 BPF_EXIT_INSN(),
573 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700574 .errstr_unpriv = "R1 pointer comparison",
575 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700576 .result = ACCEPT,
577 },
578 {
579 "jump test 2",
580 .insns = {
581 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
583 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
584 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
586 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
587 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
589 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
590 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
592 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
593 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
595 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
596 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
598 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
599 BPF_MOV64_IMM(BPF_REG_0, 0),
600 BPF_EXIT_INSN(),
601 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700602 .errstr_unpriv = "R1 pointer comparison",
603 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700604 .result = ACCEPT,
605 },
606 {
607 "jump test 3",
608 .insns = {
609 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
611 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
613 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
615 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
617 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
619 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
621 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
623 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
625 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
626 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
627 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
629 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
631 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
633 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
635 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700636 BPF_EXIT_INSN(),
637 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200638 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700639 .errstr_unpriv = "R1 pointer comparison",
640 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700641 .result = ACCEPT,
642 },
643 {
644 "jump test 4",
645 .insns = {
646 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
651 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
653 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
654 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
655 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
656 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
658 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
659 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
660 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
672 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
674 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
686 BPF_MOV64_IMM(BPF_REG_0, 0),
687 BPF_EXIT_INSN(),
688 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700689 .errstr_unpriv = "R1 pointer comparison",
690 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700691 .result = ACCEPT,
692 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700693 {
694 "jump test 5",
695 .insns = {
696 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
697 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
698 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
699 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
700 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
701 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
702 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
703 BPF_MOV64_IMM(BPF_REG_0, 0),
704 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
705 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
706 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
707 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
708 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
709 BPF_MOV64_IMM(BPF_REG_0, 0),
710 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
711 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
712 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
713 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
714 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
715 BPF_MOV64_IMM(BPF_REG_0, 0),
716 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
717 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
718 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
719 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
720 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
721 BPF_MOV64_IMM(BPF_REG_0, 0),
722 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
723 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
724 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
725 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
726 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
727 BPF_MOV64_IMM(BPF_REG_0, 0),
728 BPF_EXIT_INSN(),
729 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700730 .errstr_unpriv = "R1 pointer comparison",
731 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700732 .result = ACCEPT,
733 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700734 {
735 "access skb fields ok",
736 .insns = {
737 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
738 offsetof(struct __sk_buff, len)),
739 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
741 offsetof(struct __sk_buff, mark)),
742 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
744 offsetof(struct __sk_buff, pkt_type)),
745 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
746 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
747 offsetof(struct __sk_buff, queue_mapping)),
748 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700749 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
750 offsetof(struct __sk_buff, protocol)),
751 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
752 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
753 offsetof(struct __sk_buff, vlan_present)),
754 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
755 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
756 offsetof(struct __sk_buff, vlan_tci)),
757 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700758 BPF_EXIT_INSN(),
759 },
760 .result = ACCEPT,
761 },
762 {
763 "access skb fields bad1",
764 .insns = {
765 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
766 BPF_EXIT_INSN(),
767 },
768 .errstr = "invalid bpf_context access",
769 .result = REJECT,
770 },
771 {
772 "access skb fields bad2",
773 .insns = {
774 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
775 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
776 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
778 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200779 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
780 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700781 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
782 BPF_EXIT_INSN(),
783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
784 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
785 offsetof(struct __sk_buff, pkt_type)),
786 BPF_EXIT_INSN(),
787 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200788 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700789 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700790 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700791 .result = REJECT,
792 },
793 {
794 "access skb fields bad3",
795 .insns = {
796 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
797 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
798 offsetof(struct __sk_buff, pkt_type)),
799 BPF_EXIT_INSN(),
800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
803 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
805 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700806 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
807 BPF_EXIT_INSN(),
808 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
809 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
810 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200811 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700812 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700813 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700814 .result = REJECT,
815 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700816 {
817 "access skb fields bad4",
818 .insns = {
819 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
820 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
821 offsetof(struct __sk_buff, len)),
822 BPF_MOV64_IMM(BPF_REG_0, 0),
823 BPF_EXIT_INSN(),
824 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
827 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
829 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700830 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
831 BPF_EXIT_INSN(),
832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
833 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
834 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200835 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700836 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700837 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700838 .result = REJECT,
839 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700840 {
841 "check skb->mark is not writeable by sockets",
842 .insns = {
843 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
844 offsetof(struct __sk_buff, mark)),
845 BPF_EXIT_INSN(),
846 },
847 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700848 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700849 .result = REJECT,
850 },
851 {
852 "check skb->tc_index is not writeable by sockets",
853 .insns = {
854 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
855 offsetof(struct __sk_buff, tc_index)),
856 BPF_EXIT_INSN(),
857 },
858 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700859 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700860 .result = REJECT,
861 },
862 {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100863 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700864 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100865 BPF_MOV64_IMM(BPF_REG_0, 0),
866 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
867 offsetof(struct __sk_buff, cb[0])),
868 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
869 offsetof(struct __sk_buff, cb[0]) + 1),
870 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
871 offsetof(struct __sk_buff, cb[0]) + 2),
872 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
873 offsetof(struct __sk_buff, cb[0]) + 3),
874 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
875 offsetof(struct __sk_buff, cb[1])),
876 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
877 offsetof(struct __sk_buff, cb[1]) + 1),
878 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
879 offsetof(struct __sk_buff, cb[1]) + 2),
880 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
881 offsetof(struct __sk_buff, cb[1]) + 3),
882 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
883 offsetof(struct __sk_buff, cb[2])),
884 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
885 offsetof(struct __sk_buff, cb[2]) + 1),
886 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
887 offsetof(struct __sk_buff, cb[2]) + 2),
888 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
889 offsetof(struct __sk_buff, cb[2]) + 3),
890 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
891 offsetof(struct __sk_buff, cb[3])),
892 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
893 offsetof(struct __sk_buff, cb[3]) + 1),
894 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
895 offsetof(struct __sk_buff, cb[3]) + 2),
896 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
897 offsetof(struct __sk_buff, cb[3]) + 3),
898 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
899 offsetof(struct __sk_buff, cb[4])),
900 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
901 offsetof(struct __sk_buff, cb[4]) + 1),
902 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
903 offsetof(struct __sk_buff, cb[4]) + 2),
904 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
905 offsetof(struct __sk_buff, cb[4]) + 3),
906 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
907 offsetof(struct __sk_buff, cb[0])),
908 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, cb[0]) + 1),
910 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
911 offsetof(struct __sk_buff, cb[0]) + 2),
912 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, cb[0]) + 3),
914 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
915 offsetof(struct __sk_buff, cb[1])),
916 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
917 offsetof(struct __sk_buff, cb[1]) + 1),
918 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
919 offsetof(struct __sk_buff, cb[1]) + 2),
920 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
921 offsetof(struct __sk_buff, cb[1]) + 3),
922 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
923 offsetof(struct __sk_buff, cb[2])),
924 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
925 offsetof(struct __sk_buff, cb[2]) + 1),
926 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
927 offsetof(struct __sk_buff, cb[2]) + 2),
928 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
929 offsetof(struct __sk_buff, cb[2]) + 3),
930 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
931 offsetof(struct __sk_buff, cb[3])),
932 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
933 offsetof(struct __sk_buff, cb[3]) + 1),
934 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
935 offsetof(struct __sk_buff, cb[3]) + 2),
936 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
937 offsetof(struct __sk_buff, cb[3]) + 3),
938 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
939 offsetof(struct __sk_buff, cb[4])),
940 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
941 offsetof(struct __sk_buff, cb[4]) + 1),
942 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
943 offsetof(struct __sk_buff, cb[4]) + 2),
944 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
945 offsetof(struct __sk_buff, cb[4]) + 3),
946 BPF_EXIT_INSN(),
947 },
948 .result = ACCEPT,
949 },
950 {
951 "check cb access: byte, oob 1",
952 .insns = {
953 BPF_MOV64_IMM(BPF_REG_0, 0),
954 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
955 offsetof(struct __sk_buff, cb[4]) + 4),
956 BPF_EXIT_INSN(),
957 },
958 .errstr = "invalid bpf_context access",
959 .result = REJECT,
960 },
961 {
962 "check cb access: byte, oob 2",
963 .insns = {
964 BPF_MOV64_IMM(BPF_REG_0, 0),
965 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
966 offsetof(struct __sk_buff, cb[0]) - 1),
967 BPF_EXIT_INSN(),
968 },
969 .errstr = "invalid bpf_context access",
970 .result = REJECT,
971 },
972 {
973 "check cb access: byte, oob 3",
974 .insns = {
975 BPF_MOV64_IMM(BPF_REG_0, 0),
976 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
977 offsetof(struct __sk_buff, cb[4]) + 4),
978 BPF_EXIT_INSN(),
979 },
980 .errstr = "invalid bpf_context access",
981 .result = REJECT,
982 },
983 {
984 "check cb access: byte, oob 4",
985 .insns = {
986 BPF_MOV64_IMM(BPF_REG_0, 0),
987 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
988 offsetof(struct __sk_buff, cb[0]) - 1),
989 BPF_EXIT_INSN(),
990 },
991 .errstr = "invalid bpf_context access",
992 .result = REJECT,
993 },
994 {
995 "check cb access: byte, wrong type",
996 .insns = {
997 BPF_MOV64_IMM(BPF_REG_0, 0),
998 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700999 offsetof(struct __sk_buff, cb[0])),
1000 BPF_EXIT_INSN(),
1001 },
1002 .errstr = "invalid bpf_context access",
1003 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001004 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1005 },
1006 {
1007 "check cb access: half",
1008 .insns = {
1009 BPF_MOV64_IMM(BPF_REG_0, 0),
1010 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1011 offsetof(struct __sk_buff, cb[0])),
1012 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1013 offsetof(struct __sk_buff, cb[0]) + 2),
1014 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1015 offsetof(struct __sk_buff, cb[1])),
1016 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1017 offsetof(struct __sk_buff, cb[1]) + 2),
1018 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1019 offsetof(struct __sk_buff, cb[2])),
1020 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1021 offsetof(struct __sk_buff, cb[2]) + 2),
1022 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1023 offsetof(struct __sk_buff, cb[3])),
1024 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1025 offsetof(struct __sk_buff, cb[3]) + 2),
1026 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1027 offsetof(struct __sk_buff, cb[4])),
1028 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1029 offsetof(struct __sk_buff, cb[4]) + 2),
1030 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1031 offsetof(struct __sk_buff, cb[0])),
1032 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1033 offsetof(struct __sk_buff, cb[0]) + 2),
1034 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1035 offsetof(struct __sk_buff, cb[1])),
1036 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1037 offsetof(struct __sk_buff, cb[1]) + 2),
1038 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1039 offsetof(struct __sk_buff, cb[2])),
1040 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1041 offsetof(struct __sk_buff, cb[2]) + 2),
1042 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1043 offsetof(struct __sk_buff, cb[3])),
1044 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1045 offsetof(struct __sk_buff, cb[3]) + 2),
1046 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1047 offsetof(struct __sk_buff, cb[4])),
1048 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1049 offsetof(struct __sk_buff, cb[4]) + 2),
1050 BPF_EXIT_INSN(),
1051 },
1052 .result = ACCEPT,
1053 },
1054 {
1055 "check cb access: half, unaligned",
1056 .insns = {
1057 BPF_MOV64_IMM(BPF_REG_0, 0),
1058 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1059 offsetof(struct __sk_buff, cb[0]) + 1),
1060 BPF_EXIT_INSN(),
1061 },
1062 .errstr = "misaligned access",
1063 .result = REJECT,
1064 },
1065 {
1066 "check cb access: half, oob 1",
1067 .insns = {
1068 BPF_MOV64_IMM(BPF_REG_0, 0),
1069 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1070 offsetof(struct __sk_buff, cb[4]) + 4),
1071 BPF_EXIT_INSN(),
1072 },
1073 .errstr = "invalid bpf_context access",
1074 .result = REJECT,
1075 },
1076 {
1077 "check cb access: half, oob 2",
1078 .insns = {
1079 BPF_MOV64_IMM(BPF_REG_0, 0),
1080 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1081 offsetof(struct __sk_buff, cb[0]) - 2),
1082 BPF_EXIT_INSN(),
1083 },
1084 .errstr = "invalid bpf_context access",
1085 .result = REJECT,
1086 },
1087 {
1088 "check cb access: half, oob 3",
1089 .insns = {
1090 BPF_MOV64_IMM(BPF_REG_0, 0),
1091 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1092 offsetof(struct __sk_buff, cb[4]) + 4),
1093 BPF_EXIT_INSN(),
1094 },
1095 .errstr = "invalid bpf_context access",
1096 .result = REJECT,
1097 },
1098 {
1099 "check cb access: half, oob 4",
1100 .insns = {
1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1102 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1103 offsetof(struct __sk_buff, cb[0]) - 2),
1104 BPF_EXIT_INSN(),
1105 },
1106 .errstr = "invalid bpf_context access",
1107 .result = REJECT,
1108 },
1109 {
1110 "check cb access: half, wrong type",
1111 .insns = {
1112 BPF_MOV64_IMM(BPF_REG_0, 0),
1113 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1114 offsetof(struct __sk_buff, cb[0])),
1115 BPF_EXIT_INSN(),
1116 },
1117 .errstr = "invalid bpf_context access",
1118 .result = REJECT,
1119 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1120 },
1121 {
1122 "check cb access: word",
1123 .insns = {
1124 BPF_MOV64_IMM(BPF_REG_0, 0),
1125 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1126 offsetof(struct __sk_buff, cb[0])),
1127 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1128 offsetof(struct __sk_buff, cb[1])),
1129 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1130 offsetof(struct __sk_buff, cb[2])),
1131 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1132 offsetof(struct __sk_buff, cb[3])),
1133 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1134 offsetof(struct __sk_buff, cb[4])),
1135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1136 offsetof(struct __sk_buff, cb[0])),
1137 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1138 offsetof(struct __sk_buff, cb[1])),
1139 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1140 offsetof(struct __sk_buff, cb[2])),
1141 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1142 offsetof(struct __sk_buff, cb[3])),
1143 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1144 offsetof(struct __sk_buff, cb[4])),
1145 BPF_EXIT_INSN(),
1146 },
1147 .result = ACCEPT,
1148 },
1149 {
1150 "check cb access: word, unaligned 1",
1151 .insns = {
1152 BPF_MOV64_IMM(BPF_REG_0, 0),
1153 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1154 offsetof(struct __sk_buff, cb[0]) + 2),
1155 BPF_EXIT_INSN(),
1156 },
1157 .errstr = "misaligned access",
1158 .result = REJECT,
1159 },
1160 {
1161 "check cb access: word, unaligned 2",
1162 .insns = {
1163 BPF_MOV64_IMM(BPF_REG_0, 0),
1164 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1165 offsetof(struct __sk_buff, cb[4]) + 1),
1166 BPF_EXIT_INSN(),
1167 },
1168 .errstr = "misaligned access",
1169 .result = REJECT,
1170 },
1171 {
1172 "check cb access: word, unaligned 3",
1173 .insns = {
1174 BPF_MOV64_IMM(BPF_REG_0, 0),
1175 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1176 offsetof(struct __sk_buff, cb[4]) + 2),
1177 BPF_EXIT_INSN(),
1178 },
1179 .errstr = "misaligned access",
1180 .result = REJECT,
1181 },
1182 {
1183 "check cb access: word, unaligned 4",
1184 .insns = {
1185 BPF_MOV64_IMM(BPF_REG_0, 0),
1186 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1187 offsetof(struct __sk_buff, cb[4]) + 3),
1188 BPF_EXIT_INSN(),
1189 },
1190 .errstr = "misaligned access",
1191 .result = REJECT,
1192 },
1193 {
1194 "check cb access: double",
1195 .insns = {
1196 BPF_MOV64_IMM(BPF_REG_0, 0),
1197 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1198 offsetof(struct __sk_buff, cb[0])),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1200 offsetof(struct __sk_buff, cb[2])),
1201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1202 offsetof(struct __sk_buff, cb[0])),
1203 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1204 offsetof(struct __sk_buff, cb[2])),
1205 BPF_EXIT_INSN(),
1206 },
1207 .result = ACCEPT,
1208 },
1209 {
1210 "check cb access: double, unaligned 1",
1211 .insns = {
1212 BPF_MOV64_IMM(BPF_REG_0, 0),
1213 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1214 offsetof(struct __sk_buff, cb[1])),
1215 BPF_EXIT_INSN(),
1216 },
1217 .errstr = "misaligned access",
1218 .result = REJECT,
1219 },
1220 {
1221 "check cb access: double, unaligned 2",
1222 .insns = {
1223 BPF_MOV64_IMM(BPF_REG_0, 0),
1224 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1225 offsetof(struct __sk_buff, cb[3])),
1226 BPF_EXIT_INSN(),
1227 },
1228 .errstr = "misaligned access",
1229 .result = REJECT,
1230 },
1231 {
1232 "check cb access: double, oob 1",
1233 .insns = {
1234 BPF_MOV64_IMM(BPF_REG_0, 0),
1235 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1236 offsetof(struct __sk_buff, cb[4])),
1237 BPF_EXIT_INSN(),
1238 },
1239 .errstr = "invalid bpf_context access",
1240 .result = REJECT,
1241 },
1242 {
1243 "check cb access: double, oob 2",
1244 .insns = {
1245 BPF_MOV64_IMM(BPF_REG_0, 0),
1246 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1247 offsetof(struct __sk_buff, cb[4]) + 8),
1248 BPF_EXIT_INSN(),
1249 },
1250 .errstr = "invalid bpf_context access",
1251 .result = REJECT,
1252 },
1253 {
1254 "check cb access: double, oob 3",
1255 .insns = {
1256 BPF_MOV64_IMM(BPF_REG_0, 0),
1257 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1258 offsetof(struct __sk_buff, cb[0]) - 8),
1259 BPF_EXIT_INSN(),
1260 },
1261 .errstr = "invalid bpf_context access",
1262 .result = REJECT,
1263 },
1264 {
1265 "check cb access: double, oob 4",
1266 .insns = {
1267 BPF_MOV64_IMM(BPF_REG_0, 0),
1268 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1269 offsetof(struct __sk_buff, cb[4])),
1270 BPF_EXIT_INSN(),
1271 },
1272 .errstr = "invalid bpf_context access",
1273 .result = REJECT,
1274 },
1275 {
1276 "check cb access: double, oob 5",
1277 .insns = {
1278 BPF_MOV64_IMM(BPF_REG_0, 0),
1279 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1280 offsetof(struct __sk_buff, cb[4]) + 8),
1281 BPF_EXIT_INSN(),
1282 },
1283 .errstr = "invalid bpf_context access",
1284 .result = REJECT,
1285 },
1286 {
1287 "check cb access: double, oob 6",
1288 .insns = {
1289 BPF_MOV64_IMM(BPF_REG_0, 0),
1290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1291 offsetof(struct __sk_buff, cb[0]) - 8),
1292 BPF_EXIT_INSN(),
1293 },
1294 .errstr = "invalid bpf_context access",
1295 .result = REJECT,
1296 },
1297 {
1298 "check cb access: double, wrong type",
1299 .insns = {
1300 BPF_MOV64_IMM(BPF_REG_0, 0),
1301 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1302 offsetof(struct __sk_buff, cb[0])),
1303 BPF_EXIT_INSN(),
1304 },
1305 .errstr = "invalid bpf_context access",
1306 .result = REJECT,
1307 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001308 },
1309 {
1310 "check out of range skb->cb access",
1311 .insns = {
1312 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001313 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001314 BPF_EXIT_INSN(),
1315 },
1316 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001317 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001318 .result = REJECT,
1319 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1320 },
1321 {
1322 "write skb fields from socket prog",
1323 .insns = {
1324 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1325 offsetof(struct __sk_buff, cb[4])),
1326 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1327 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1328 offsetof(struct __sk_buff, mark)),
1329 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1330 offsetof(struct __sk_buff, tc_index)),
1331 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1332 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1333 offsetof(struct __sk_buff, cb[0])),
1334 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1335 offsetof(struct __sk_buff, cb[2])),
1336 BPF_EXIT_INSN(),
1337 },
1338 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001339 .errstr_unpriv = "R1 leaks addr",
1340 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001341 },
1342 {
1343 "write skb fields from tc_cls_act prog",
1344 .insns = {
1345 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1346 offsetof(struct __sk_buff, cb[0])),
1347 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1348 offsetof(struct __sk_buff, mark)),
1349 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1350 offsetof(struct __sk_buff, tc_index)),
1351 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1352 offsetof(struct __sk_buff, tc_index)),
1353 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1354 offsetof(struct __sk_buff, cb[3])),
1355 BPF_EXIT_INSN(),
1356 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001357 .errstr_unpriv = "",
1358 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001359 .result = ACCEPT,
1360 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1361 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001362 {
1363 "PTR_TO_STACK store/load",
1364 .insns = {
1365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1367 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1368 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1369 BPF_EXIT_INSN(),
1370 },
1371 .result = ACCEPT,
1372 },
1373 {
1374 "PTR_TO_STACK store/load - bad alignment on off",
1375 .insns = {
1376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1378 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1380 BPF_EXIT_INSN(),
1381 },
1382 .result = REJECT,
1383 .errstr = "misaligned access off -6 size 8",
1384 },
1385 {
1386 "PTR_TO_STACK store/load - bad alignment on reg",
1387 .insns = {
1388 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1390 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1391 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1392 BPF_EXIT_INSN(),
1393 },
1394 .result = REJECT,
1395 .errstr = "misaligned access off -2 size 8",
1396 },
1397 {
1398 "PTR_TO_STACK store/load - out of bounds low",
1399 .insns = {
1400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1402 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1403 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1404 BPF_EXIT_INSN(),
1405 },
1406 .result = REJECT,
1407 .errstr = "invalid stack off=-79992 size=8",
1408 },
1409 {
1410 "PTR_TO_STACK store/load - out of bounds high",
1411 .insns = {
1412 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1414 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1415 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1416 BPF_EXIT_INSN(),
1417 },
1418 .result = REJECT,
1419 .errstr = "invalid stack off=0 size=8",
1420 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001421 {
1422 "unpriv: return pointer",
1423 .insns = {
1424 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1425 BPF_EXIT_INSN(),
1426 },
1427 .result = ACCEPT,
1428 .result_unpriv = REJECT,
1429 .errstr_unpriv = "R0 leaks addr",
1430 },
1431 {
1432 "unpriv: add const to pointer",
1433 .insns = {
1434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1435 BPF_MOV64_IMM(BPF_REG_0, 0),
1436 BPF_EXIT_INSN(),
1437 },
1438 .result = ACCEPT,
1439 .result_unpriv = REJECT,
1440 .errstr_unpriv = "R1 pointer arithmetic",
1441 },
1442 {
1443 "unpriv: add pointer to pointer",
1444 .insns = {
1445 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1446 BPF_MOV64_IMM(BPF_REG_0, 0),
1447 BPF_EXIT_INSN(),
1448 },
1449 .result = ACCEPT,
1450 .result_unpriv = REJECT,
1451 .errstr_unpriv = "R1 pointer arithmetic",
1452 },
1453 {
1454 "unpriv: neg pointer",
1455 .insns = {
1456 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1457 BPF_MOV64_IMM(BPF_REG_0, 0),
1458 BPF_EXIT_INSN(),
1459 },
1460 .result = ACCEPT,
1461 .result_unpriv = REJECT,
1462 .errstr_unpriv = "R1 pointer arithmetic",
1463 },
1464 {
1465 "unpriv: cmp pointer with const",
1466 .insns = {
1467 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1468 BPF_MOV64_IMM(BPF_REG_0, 0),
1469 BPF_EXIT_INSN(),
1470 },
1471 .result = ACCEPT,
1472 .result_unpriv = REJECT,
1473 .errstr_unpriv = "R1 pointer comparison",
1474 },
1475 {
1476 "unpriv: cmp pointer with pointer",
1477 .insns = {
1478 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1479 BPF_MOV64_IMM(BPF_REG_0, 0),
1480 BPF_EXIT_INSN(),
1481 },
1482 .result = ACCEPT,
1483 .result_unpriv = REJECT,
1484 .errstr_unpriv = "R10 pointer comparison",
1485 },
1486 {
1487 "unpriv: check that printk is disallowed",
1488 .insns = {
1489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1492 BPF_MOV64_IMM(BPF_REG_2, 8),
1493 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1495 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001496 BPF_MOV64_IMM(BPF_REG_0, 0),
1497 BPF_EXIT_INSN(),
1498 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001499 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001500 .result_unpriv = REJECT,
1501 .result = ACCEPT,
1502 },
1503 {
1504 "unpriv: pass pointer to helper function",
1505 .insns = {
1506 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1509 BPF_LD_MAP_FD(BPF_REG_1, 0),
1510 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1511 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001512 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1513 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001514 BPF_MOV64_IMM(BPF_REG_0, 0),
1515 BPF_EXIT_INSN(),
1516 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001517 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001518 .errstr_unpriv = "R4 leaks addr",
1519 .result_unpriv = REJECT,
1520 .result = ACCEPT,
1521 },
1522 {
1523 "unpriv: indirectly pass pointer on stack to helper function",
1524 .insns = {
1525 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1526 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1528 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1530 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001531 BPF_MOV64_IMM(BPF_REG_0, 0),
1532 BPF_EXIT_INSN(),
1533 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001534 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001535 .errstr = "invalid indirect read from stack off -8+0 size 8",
1536 .result = REJECT,
1537 },
1538 {
1539 "unpriv: mangle pointer on stack 1",
1540 .insns = {
1541 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1542 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1543 BPF_MOV64_IMM(BPF_REG_0, 0),
1544 BPF_EXIT_INSN(),
1545 },
1546 .errstr_unpriv = "attempt to corrupt spilled",
1547 .result_unpriv = REJECT,
1548 .result = ACCEPT,
1549 },
1550 {
1551 "unpriv: mangle pointer on stack 2",
1552 .insns = {
1553 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1554 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1555 BPF_MOV64_IMM(BPF_REG_0, 0),
1556 BPF_EXIT_INSN(),
1557 },
1558 .errstr_unpriv = "attempt to corrupt spilled",
1559 .result_unpriv = REJECT,
1560 .result = ACCEPT,
1561 },
1562 {
1563 "unpriv: read pointer from stack in small chunks",
1564 .insns = {
1565 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1566 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1567 BPF_MOV64_IMM(BPF_REG_0, 0),
1568 BPF_EXIT_INSN(),
1569 },
1570 .errstr = "invalid size",
1571 .result = REJECT,
1572 },
1573 {
1574 "unpriv: write pointer into ctx",
1575 .insns = {
1576 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1577 BPF_MOV64_IMM(BPF_REG_0, 0),
1578 BPF_EXIT_INSN(),
1579 },
1580 .errstr_unpriv = "R1 leaks addr",
1581 .result_unpriv = REJECT,
1582 .errstr = "invalid bpf_context access",
1583 .result = REJECT,
1584 },
1585 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001586 "unpriv: spill/fill of ctx",
1587 .insns = {
1588 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1590 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1591 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1592 BPF_MOV64_IMM(BPF_REG_0, 0),
1593 BPF_EXIT_INSN(),
1594 },
1595 .result = ACCEPT,
1596 },
1597 {
1598 "unpriv: spill/fill of ctx 2",
1599 .insns = {
1600 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1602 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1603 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001604 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1605 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001606 BPF_EXIT_INSN(),
1607 },
1608 .result = ACCEPT,
1609 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1610 },
1611 {
1612 "unpriv: spill/fill of ctx 3",
1613 .insns = {
1614 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1616 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1617 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1618 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1620 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001621 BPF_EXIT_INSN(),
1622 },
1623 .result = REJECT,
1624 .errstr = "R1 type=fp expected=ctx",
1625 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1626 },
1627 {
1628 "unpriv: spill/fill of ctx 4",
1629 .insns = {
1630 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1632 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1633 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001634 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1635 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001636 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001637 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1638 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001639 BPF_EXIT_INSN(),
1640 },
1641 .result = REJECT,
1642 .errstr = "R1 type=inv expected=ctx",
1643 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1644 },
1645 {
1646 "unpriv: spill/fill of different pointers stx",
1647 .insns = {
1648 BPF_MOV64_IMM(BPF_REG_3, 42),
1649 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1651 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1652 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1654 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1655 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1656 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1657 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1658 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1659 offsetof(struct __sk_buff, mark)),
1660 BPF_MOV64_IMM(BPF_REG_0, 0),
1661 BPF_EXIT_INSN(),
1662 },
1663 .result = REJECT,
1664 .errstr = "same insn cannot be used with different pointers",
1665 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1666 },
1667 {
1668 "unpriv: spill/fill of different pointers ldx",
1669 .insns = {
1670 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1672 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1675 -(__s32)offsetof(struct bpf_perf_event_data,
1676 sample_period) - 8),
1677 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1678 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1679 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1680 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1681 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1682 offsetof(struct bpf_perf_event_data,
1683 sample_period)),
1684 BPF_MOV64_IMM(BPF_REG_0, 0),
1685 BPF_EXIT_INSN(),
1686 },
1687 .result = REJECT,
1688 .errstr = "same insn cannot be used with different pointers",
1689 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1690 },
1691 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001692 "unpriv: write pointer into map elem value",
1693 .insns = {
1694 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1697 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1699 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001700 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1701 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1702 BPF_EXIT_INSN(),
1703 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001704 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001705 .errstr_unpriv = "R0 leaks addr",
1706 .result_unpriv = REJECT,
1707 .result = ACCEPT,
1708 },
1709 {
1710 "unpriv: partial copy of pointer",
1711 .insns = {
1712 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1713 BPF_MOV64_IMM(BPF_REG_0, 0),
1714 BPF_EXIT_INSN(),
1715 },
1716 .errstr_unpriv = "R10 partial copy",
1717 .result_unpriv = REJECT,
1718 .result = ACCEPT,
1719 },
1720 {
1721 "unpriv: pass pointer to tail_call",
1722 .insns = {
1723 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1724 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1726 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001727 BPF_MOV64_IMM(BPF_REG_0, 0),
1728 BPF_EXIT_INSN(),
1729 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001730 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001731 .errstr_unpriv = "R3 leaks addr into helper",
1732 .result_unpriv = REJECT,
1733 .result = ACCEPT,
1734 },
1735 {
1736 "unpriv: cmp map pointer with zero",
1737 .insns = {
1738 BPF_MOV64_IMM(BPF_REG_1, 0),
1739 BPF_LD_MAP_FD(BPF_REG_1, 0),
1740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1741 BPF_MOV64_IMM(BPF_REG_0, 0),
1742 BPF_EXIT_INSN(),
1743 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001744 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001745 .errstr_unpriv = "R1 pointer comparison",
1746 .result_unpriv = REJECT,
1747 .result = ACCEPT,
1748 },
1749 {
1750 "unpriv: write into frame pointer",
1751 .insns = {
1752 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1753 BPF_MOV64_IMM(BPF_REG_0, 0),
1754 BPF_EXIT_INSN(),
1755 },
1756 .errstr = "frame pointer is read only",
1757 .result = REJECT,
1758 },
1759 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001760 "unpriv: spill/fill frame pointer",
1761 .insns = {
1762 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1764 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1765 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1766 BPF_MOV64_IMM(BPF_REG_0, 0),
1767 BPF_EXIT_INSN(),
1768 },
1769 .errstr = "frame pointer is read only",
1770 .result = REJECT,
1771 },
1772 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001773 "unpriv: cmp of frame pointer",
1774 .insns = {
1775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1776 BPF_MOV64_IMM(BPF_REG_0, 0),
1777 BPF_EXIT_INSN(),
1778 },
1779 .errstr_unpriv = "R10 pointer comparison",
1780 .result_unpriv = REJECT,
1781 .result = ACCEPT,
1782 },
1783 {
1784 "unpriv: cmp of stack pointer",
1785 .insns = {
1786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1789 BPF_MOV64_IMM(BPF_REG_0, 0),
1790 BPF_EXIT_INSN(),
1791 },
1792 .errstr_unpriv = "R2 pointer comparison",
1793 .result_unpriv = REJECT,
1794 .result = ACCEPT,
1795 },
1796 {
1797 "unpriv: obfuscate stack pointer",
1798 .insns = {
1799 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1802 BPF_MOV64_IMM(BPF_REG_0, 0),
1803 BPF_EXIT_INSN(),
1804 },
1805 .errstr_unpriv = "R2 pointer arithmetic",
1806 .result_unpriv = REJECT,
1807 .result = ACCEPT,
1808 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001809 {
1810 "raw_stack: no skb_load_bytes",
1811 .insns = {
1812 BPF_MOV64_IMM(BPF_REG_2, 4),
1813 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1815 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1816 BPF_MOV64_IMM(BPF_REG_4, 8),
1817 /* Call to skb_load_bytes() omitted. */
1818 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1819 BPF_EXIT_INSN(),
1820 },
1821 .result = REJECT,
1822 .errstr = "invalid read from stack off -8+0 size 8",
1823 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1824 },
1825 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001826 "raw_stack: skb_load_bytes, negative len",
1827 .insns = {
1828 BPF_MOV64_IMM(BPF_REG_2, 4),
1829 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1831 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1832 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001833 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1834 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001835 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1836 BPF_EXIT_INSN(),
1837 },
1838 .result = REJECT,
1839 .errstr = "invalid stack type R3",
1840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1841 },
1842 {
1843 "raw_stack: skb_load_bytes, negative len 2",
1844 .insns = {
1845 BPF_MOV64_IMM(BPF_REG_2, 4),
1846 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1848 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1849 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1851 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1853 BPF_EXIT_INSN(),
1854 },
1855 .result = REJECT,
1856 .errstr = "invalid stack type R3",
1857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1858 },
1859 {
1860 "raw_stack: skb_load_bytes, zero len",
1861 .insns = {
1862 BPF_MOV64_IMM(BPF_REG_2, 4),
1863 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1865 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1866 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1868 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001869 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1870 BPF_EXIT_INSN(),
1871 },
1872 .result = REJECT,
1873 .errstr = "invalid stack type R3",
1874 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1875 },
1876 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001877 "raw_stack: skb_load_bytes, no init",
1878 .insns = {
1879 BPF_MOV64_IMM(BPF_REG_2, 4),
1880 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1882 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1883 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1885 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001886 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1887 BPF_EXIT_INSN(),
1888 },
1889 .result = ACCEPT,
1890 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1891 },
1892 {
1893 "raw_stack: skb_load_bytes, init",
1894 .insns = {
1895 BPF_MOV64_IMM(BPF_REG_2, 4),
1896 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1898 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1899 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1900 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1902 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001903 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1904 BPF_EXIT_INSN(),
1905 },
1906 .result = ACCEPT,
1907 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1908 },
1909 {
1910 "raw_stack: skb_load_bytes, spilled regs around bounds",
1911 .insns = {
1912 BPF_MOV64_IMM(BPF_REG_2, 4),
1913 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001915 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1916 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001917 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1918 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1920 BPF_FUNC_skb_load_bytes),
1921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1922 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001923 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1924 offsetof(struct __sk_buff, mark)),
1925 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1926 offsetof(struct __sk_buff, priority)),
1927 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1928 BPF_EXIT_INSN(),
1929 },
1930 .result = ACCEPT,
1931 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1932 },
1933 {
1934 "raw_stack: skb_load_bytes, spilled regs corruption",
1935 .insns = {
1936 BPF_MOV64_IMM(BPF_REG_2, 4),
1937 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001939 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001940 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1941 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001942 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1943 BPF_FUNC_skb_load_bytes),
1944 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001945 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1946 offsetof(struct __sk_buff, mark)),
1947 BPF_EXIT_INSN(),
1948 },
1949 .result = REJECT,
1950 .errstr = "R0 invalid mem access 'inv'",
1951 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1952 },
1953 {
1954 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1955 .insns = {
1956 BPF_MOV64_IMM(BPF_REG_2, 4),
1957 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001959 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1960 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1961 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001962 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1963 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1965 BPF_FUNC_skb_load_bytes),
1966 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1967 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1968 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001969 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1970 offsetof(struct __sk_buff, mark)),
1971 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1972 offsetof(struct __sk_buff, priority)),
1973 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1974 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
1975 offsetof(struct __sk_buff, pkt_type)),
1976 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1977 BPF_EXIT_INSN(),
1978 },
1979 .result = REJECT,
1980 .errstr = "R3 invalid mem access 'inv'",
1981 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1982 },
1983 {
1984 "raw_stack: skb_load_bytes, spilled regs + data",
1985 .insns = {
1986 BPF_MOV64_IMM(BPF_REG_2, 4),
1987 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001989 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1990 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1991 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001992 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1993 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1995 BPF_FUNC_skb_load_bytes),
1996 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1997 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1998 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001999 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2000 offsetof(struct __sk_buff, mark)),
2001 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2002 offsetof(struct __sk_buff, priority)),
2003 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2004 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2005 BPF_EXIT_INSN(),
2006 },
2007 .result = ACCEPT,
2008 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2009 },
2010 {
2011 "raw_stack: skb_load_bytes, invalid access 1",
2012 .insns = {
2013 BPF_MOV64_IMM(BPF_REG_2, 4),
2014 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2016 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2017 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2019 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002020 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2021 BPF_EXIT_INSN(),
2022 },
2023 .result = REJECT,
2024 .errstr = "invalid stack type R3 off=-513 access_size=8",
2025 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2026 },
2027 {
2028 "raw_stack: skb_load_bytes, invalid access 2",
2029 .insns = {
2030 BPF_MOV64_IMM(BPF_REG_2, 4),
2031 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2033 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2034 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2036 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002037 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2038 BPF_EXIT_INSN(),
2039 },
2040 .result = REJECT,
2041 .errstr = "invalid stack type R3 off=-1 access_size=8",
2042 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2043 },
2044 {
2045 "raw_stack: skb_load_bytes, invalid access 3",
2046 .insns = {
2047 BPF_MOV64_IMM(BPF_REG_2, 4),
2048 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2050 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2051 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002052 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2053 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2055 BPF_EXIT_INSN(),
2056 },
2057 .result = REJECT,
2058 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2059 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2060 },
2061 {
2062 "raw_stack: skb_load_bytes, invalid access 4",
2063 .insns = {
2064 BPF_MOV64_IMM(BPF_REG_2, 4),
2065 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2067 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2068 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2070 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002071 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2072 BPF_EXIT_INSN(),
2073 },
2074 .result = REJECT,
2075 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2077 },
2078 {
2079 "raw_stack: skb_load_bytes, invalid access 5",
2080 .insns = {
2081 BPF_MOV64_IMM(BPF_REG_2, 4),
2082 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2084 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2085 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2087 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002088 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2089 BPF_EXIT_INSN(),
2090 },
2091 .result = REJECT,
2092 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2093 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2094 },
2095 {
2096 "raw_stack: skb_load_bytes, invalid access 6",
2097 .insns = {
2098 BPF_MOV64_IMM(BPF_REG_2, 4),
2099 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2101 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2102 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2104 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002105 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2106 BPF_EXIT_INSN(),
2107 },
2108 .result = REJECT,
2109 .errstr = "invalid stack type R3 off=-512 access_size=0",
2110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2111 },
2112 {
2113 "raw_stack: skb_load_bytes, large access",
2114 .insns = {
2115 BPF_MOV64_IMM(BPF_REG_2, 4),
2116 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2118 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2119 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2121 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002122 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2123 BPF_EXIT_INSN(),
2124 },
2125 .result = ACCEPT,
2126 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2127 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002128 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002129 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002130 .insns = {
2131 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2132 offsetof(struct __sk_buff, data)),
2133 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2134 offsetof(struct __sk_buff, data_end)),
2135 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2137 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2138 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2139 BPF_MOV64_IMM(BPF_REG_0, 0),
2140 BPF_EXIT_INSN(),
2141 },
2142 .result = ACCEPT,
2143 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2144 },
2145 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002146 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002147 .insns = {
2148 BPF_MOV64_IMM(BPF_REG_0, 1),
2149 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2150 offsetof(struct __sk_buff, data_end)),
2151 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2152 offsetof(struct __sk_buff, data)),
2153 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2155 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2156 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2157 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2158 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2159 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2160 offsetof(struct __sk_buff, data)),
2161 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2163 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2164 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2165 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2166 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2167 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2168 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2169 offsetof(struct __sk_buff, data_end)),
2170 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2171 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2172 BPF_MOV64_IMM(BPF_REG_0, 0),
2173 BPF_EXIT_INSN(),
2174 },
2175 .result = ACCEPT,
2176 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2177 },
2178 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002179 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002180 .insns = {
2181 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2182 offsetof(struct __sk_buff, data)),
2183 BPF_MOV64_IMM(BPF_REG_0, 0),
2184 BPF_EXIT_INSN(),
2185 },
2186 .errstr = "invalid bpf_context access off=76",
2187 .result = REJECT,
2188 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2189 },
2190 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002191 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002192 .insns = {
2193 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2194 offsetof(struct __sk_buff, data)),
2195 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2196 offsetof(struct __sk_buff, data_end)),
2197 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2199 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2200 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2201 BPF_MOV64_IMM(BPF_REG_0, 0),
2202 BPF_EXIT_INSN(),
2203 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002204 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2206 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002207 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002208 "direct packet access: test5 (pkt_end >= reg, good access)",
2209 .insns = {
2210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2211 offsetof(struct __sk_buff, data)),
2212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2213 offsetof(struct __sk_buff, data_end)),
2214 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2216 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2217 BPF_MOV64_IMM(BPF_REG_0, 1),
2218 BPF_EXIT_INSN(),
2219 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2220 BPF_MOV64_IMM(BPF_REG_0, 0),
2221 BPF_EXIT_INSN(),
2222 },
2223 .result = ACCEPT,
2224 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2225 },
2226 {
2227 "direct packet access: test6 (pkt_end >= reg, bad access)",
2228 .insns = {
2229 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2230 offsetof(struct __sk_buff, data)),
2231 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2232 offsetof(struct __sk_buff, data_end)),
2233 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2235 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2236 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2237 BPF_MOV64_IMM(BPF_REG_0, 1),
2238 BPF_EXIT_INSN(),
2239 BPF_MOV64_IMM(BPF_REG_0, 0),
2240 BPF_EXIT_INSN(),
2241 },
2242 .errstr = "invalid access to packet",
2243 .result = REJECT,
2244 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2245 },
2246 {
2247 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2248 .insns = {
2249 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2250 offsetof(struct __sk_buff, data)),
2251 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2252 offsetof(struct __sk_buff, data_end)),
2253 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2255 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2256 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2257 BPF_MOV64_IMM(BPF_REG_0, 1),
2258 BPF_EXIT_INSN(),
2259 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2260 BPF_MOV64_IMM(BPF_REG_0, 0),
2261 BPF_EXIT_INSN(),
2262 },
2263 .errstr = "invalid access to packet",
2264 .result = REJECT,
2265 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2266 },
2267 {
2268 "direct packet access: test8 (double test, variant 1)",
2269 .insns = {
2270 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2271 offsetof(struct __sk_buff, data)),
2272 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2273 offsetof(struct __sk_buff, data_end)),
2274 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2276 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2277 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2278 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2279 BPF_MOV64_IMM(BPF_REG_0, 1),
2280 BPF_EXIT_INSN(),
2281 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2282 BPF_MOV64_IMM(BPF_REG_0, 0),
2283 BPF_EXIT_INSN(),
2284 },
2285 .result = ACCEPT,
2286 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2287 },
2288 {
2289 "direct packet access: test9 (double test, variant 2)",
2290 .insns = {
2291 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2292 offsetof(struct __sk_buff, data)),
2293 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2294 offsetof(struct __sk_buff, data_end)),
2295 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2297 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2298 BPF_MOV64_IMM(BPF_REG_0, 1),
2299 BPF_EXIT_INSN(),
2300 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2302 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2303 BPF_MOV64_IMM(BPF_REG_0, 0),
2304 BPF_EXIT_INSN(),
2305 },
2306 .result = ACCEPT,
2307 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2308 },
2309 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002310 "direct packet access: test10 (write invalid)",
2311 .insns = {
2312 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2313 offsetof(struct __sk_buff, data)),
2314 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2315 offsetof(struct __sk_buff, data_end)),
2316 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2318 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2319 BPF_MOV64_IMM(BPF_REG_0, 0),
2320 BPF_EXIT_INSN(),
2321 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2322 BPF_MOV64_IMM(BPF_REG_0, 0),
2323 BPF_EXIT_INSN(),
2324 },
2325 .errstr = "invalid access to packet",
2326 .result = REJECT,
2327 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2328 },
2329 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002330 "direct packet access: test11 (shift, good access)",
2331 .insns = {
2332 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2333 offsetof(struct __sk_buff, data)),
2334 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2335 offsetof(struct __sk_buff, data_end)),
2336 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2337 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2338 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2339 BPF_MOV64_IMM(BPF_REG_3, 144),
2340 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2342 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2343 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2344 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2345 BPF_MOV64_IMM(BPF_REG_0, 1),
2346 BPF_EXIT_INSN(),
2347 BPF_MOV64_IMM(BPF_REG_0, 0),
2348 BPF_EXIT_INSN(),
2349 },
2350 .result = ACCEPT,
2351 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2352 },
2353 {
2354 "direct packet access: test12 (and, good access)",
2355 .insns = {
2356 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2357 offsetof(struct __sk_buff, data)),
2358 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2359 offsetof(struct __sk_buff, data_end)),
2360 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2362 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2363 BPF_MOV64_IMM(BPF_REG_3, 144),
2364 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2366 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2367 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2368 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2369 BPF_MOV64_IMM(BPF_REG_0, 1),
2370 BPF_EXIT_INSN(),
2371 BPF_MOV64_IMM(BPF_REG_0, 0),
2372 BPF_EXIT_INSN(),
2373 },
2374 .result = ACCEPT,
2375 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2376 },
2377 {
2378 "direct packet access: test13 (branches, good access)",
2379 .insns = {
2380 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2381 offsetof(struct __sk_buff, data)),
2382 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2383 offsetof(struct __sk_buff, data_end)),
2384 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2386 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2387 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2388 offsetof(struct __sk_buff, mark)),
2389 BPF_MOV64_IMM(BPF_REG_4, 1),
2390 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2391 BPF_MOV64_IMM(BPF_REG_3, 14),
2392 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2393 BPF_MOV64_IMM(BPF_REG_3, 24),
2394 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2396 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2397 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2398 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2399 BPF_MOV64_IMM(BPF_REG_0, 1),
2400 BPF_EXIT_INSN(),
2401 BPF_MOV64_IMM(BPF_REG_0, 0),
2402 BPF_EXIT_INSN(),
2403 },
2404 .result = ACCEPT,
2405 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2406 },
2407 {
William Tu63dfef72017-02-04 08:37:29 -08002408 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2409 .insns = {
2410 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2411 offsetof(struct __sk_buff, data)),
2412 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2413 offsetof(struct __sk_buff, data_end)),
2414 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2416 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2417 BPF_MOV64_IMM(BPF_REG_5, 12),
2418 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2419 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2420 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2421 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2422 BPF_MOV64_IMM(BPF_REG_0, 1),
2423 BPF_EXIT_INSN(),
2424 BPF_MOV64_IMM(BPF_REG_0, 0),
2425 BPF_EXIT_INSN(),
2426 },
2427 .result = ACCEPT,
2428 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2429 },
2430 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002431 "helper access to packet: test1, valid packet_ptr range",
2432 .insns = {
2433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2434 offsetof(struct xdp_md, data)),
2435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2436 offsetof(struct xdp_md, data_end)),
2437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2439 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2440 BPF_LD_MAP_FD(BPF_REG_1, 0),
2441 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2442 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2444 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002445 BPF_MOV64_IMM(BPF_REG_0, 0),
2446 BPF_EXIT_INSN(),
2447 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002448 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002449 .result_unpriv = ACCEPT,
2450 .result = ACCEPT,
2451 .prog_type = BPF_PROG_TYPE_XDP,
2452 },
2453 {
2454 "helper access to packet: test2, unchecked packet_ptr",
2455 .insns = {
2456 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2457 offsetof(struct xdp_md, data)),
2458 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2460 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002461 BPF_MOV64_IMM(BPF_REG_0, 0),
2462 BPF_EXIT_INSN(),
2463 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002464 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002465 .result = REJECT,
2466 .errstr = "invalid access to packet",
2467 .prog_type = BPF_PROG_TYPE_XDP,
2468 },
2469 {
2470 "helper access to packet: test3, variable add",
2471 .insns = {
2472 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2473 offsetof(struct xdp_md, data)),
2474 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2475 offsetof(struct xdp_md, data_end)),
2476 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2478 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2479 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2480 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2481 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2482 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2484 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2485 BPF_LD_MAP_FD(BPF_REG_1, 0),
2486 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2488 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002489 BPF_MOV64_IMM(BPF_REG_0, 0),
2490 BPF_EXIT_INSN(),
2491 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002492 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002493 .result = ACCEPT,
2494 .prog_type = BPF_PROG_TYPE_XDP,
2495 },
2496 {
2497 "helper access to packet: test4, packet_ptr with bad range",
2498 .insns = {
2499 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2500 offsetof(struct xdp_md, data)),
2501 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2502 offsetof(struct xdp_md, data_end)),
2503 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2505 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2506 BPF_MOV64_IMM(BPF_REG_0, 0),
2507 BPF_EXIT_INSN(),
2508 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2510 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002511 BPF_MOV64_IMM(BPF_REG_0, 0),
2512 BPF_EXIT_INSN(),
2513 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002514 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002515 .result = REJECT,
2516 .errstr = "invalid access to packet",
2517 .prog_type = BPF_PROG_TYPE_XDP,
2518 },
2519 {
2520 "helper access to packet: test5, packet_ptr with too short range",
2521 .insns = {
2522 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2523 offsetof(struct xdp_md, data)),
2524 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2525 offsetof(struct xdp_md, data_end)),
2526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2527 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2529 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2530 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002531 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2532 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002533 BPF_MOV64_IMM(BPF_REG_0, 0),
2534 BPF_EXIT_INSN(),
2535 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002536 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002537 .result = REJECT,
2538 .errstr = "invalid access to packet",
2539 .prog_type = BPF_PROG_TYPE_XDP,
2540 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002541 {
2542 "helper access to packet: test6, cls valid packet_ptr range",
2543 .insns = {
2544 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2545 offsetof(struct __sk_buff, data)),
2546 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2547 offsetof(struct __sk_buff, data_end)),
2548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2550 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2551 BPF_LD_MAP_FD(BPF_REG_1, 0),
2552 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2553 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2555 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002556 BPF_MOV64_IMM(BPF_REG_0, 0),
2557 BPF_EXIT_INSN(),
2558 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002559 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002560 .result = ACCEPT,
2561 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2562 },
2563 {
2564 "helper access to packet: test7, cls unchecked packet_ptr",
2565 .insns = {
2566 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2567 offsetof(struct __sk_buff, data)),
2568 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2570 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002571 BPF_MOV64_IMM(BPF_REG_0, 0),
2572 BPF_EXIT_INSN(),
2573 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002574 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002575 .result = REJECT,
2576 .errstr = "invalid access to packet",
2577 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2578 },
2579 {
2580 "helper access to packet: test8, cls variable add",
2581 .insns = {
2582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2583 offsetof(struct __sk_buff, data)),
2584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2585 offsetof(struct __sk_buff, data_end)),
2586 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2588 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2589 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2590 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2591 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2592 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2593 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2594 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2595 BPF_LD_MAP_FD(BPF_REG_1, 0),
2596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2598 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002599 BPF_MOV64_IMM(BPF_REG_0, 0),
2600 BPF_EXIT_INSN(),
2601 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002602 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002603 .result = ACCEPT,
2604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2605 },
2606 {
2607 "helper access to packet: test9, cls packet_ptr with bad range",
2608 .insns = {
2609 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2610 offsetof(struct __sk_buff, data)),
2611 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2612 offsetof(struct __sk_buff, data_end)),
2613 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2615 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2616 BPF_MOV64_IMM(BPF_REG_0, 0),
2617 BPF_EXIT_INSN(),
2618 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2620 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002621 BPF_MOV64_IMM(BPF_REG_0, 0),
2622 BPF_EXIT_INSN(),
2623 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002624 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002625 .result = REJECT,
2626 .errstr = "invalid access to packet",
2627 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2628 },
2629 {
2630 "helper access to packet: test10, cls packet_ptr with too short range",
2631 .insns = {
2632 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2633 offsetof(struct __sk_buff, data)),
2634 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2635 offsetof(struct __sk_buff, data_end)),
2636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2637 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2639 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2640 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2642 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002643 BPF_MOV64_IMM(BPF_REG_0, 0),
2644 BPF_EXIT_INSN(),
2645 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002646 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002647 .result = REJECT,
2648 .errstr = "invalid access to packet",
2649 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2650 },
2651 {
2652 "helper access to packet: test11, cls unsuitable helper 1",
2653 .insns = {
2654 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2655 offsetof(struct __sk_buff, data)),
2656 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2657 offsetof(struct __sk_buff, data_end)),
2658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2659 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2661 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2662 BPF_MOV64_IMM(BPF_REG_2, 0),
2663 BPF_MOV64_IMM(BPF_REG_4, 42),
2664 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002665 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2666 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002667 BPF_MOV64_IMM(BPF_REG_0, 0),
2668 BPF_EXIT_INSN(),
2669 },
2670 .result = REJECT,
2671 .errstr = "helper access to the packet",
2672 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2673 },
2674 {
2675 "helper access to packet: test12, cls unsuitable helper 2",
2676 .insns = {
2677 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2678 offsetof(struct __sk_buff, data)),
2679 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2680 offsetof(struct __sk_buff, data_end)),
2681 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2683 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2684 BPF_MOV64_IMM(BPF_REG_2, 0),
2685 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2687 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002688 BPF_MOV64_IMM(BPF_REG_0, 0),
2689 BPF_EXIT_INSN(),
2690 },
2691 .result = REJECT,
2692 .errstr = "helper access to the packet",
2693 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2694 },
2695 {
2696 "helper access to packet: test13, cls helper ok",
2697 .insns = {
2698 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2699 offsetof(struct __sk_buff, data)),
2700 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2701 offsetof(struct __sk_buff, data_end)),
2702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2705 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2706 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2707 BPF_MOV64_IMM(BPF_REG_2, 4),
2708 BPF_MOV64_IMM(BPF_REG_3, 0),
2709 BPF_MOV64_IMM(BPF_REG_4, 0),
2710 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2712 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002713 BPF_MOV64_IMM(BPF_REG_0, 0),
2714 BPF_EXIT_INSN(),
2715 },
2716 .result = ACCEPT,
2717 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2718 },
2719 {
2720 "helper access to packet: test14, cls helper fail sub",
2721 .insns = {
2722 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2723 offsetof(struct __sk_buff, data)),
2724 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2725 offsetof(struct __sk_buff, data_end)),
2726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2729 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2730 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2731 BPF_MOV64_IMM(BPF_REG_2, 4),
2732 BPF_MOV64_IMM(BPF_REG_3, 0),
2733 BPF_MOV64_IMM(BPF_REG_4, 0),
2734 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2736 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002737 BPF_MOV64_IMM(BPF_REG_0, 0),
2738 BPF_EXIT_INSN(),
2739 },
2740 .result = REJECT,
2741 .errstr = "type=inv expected=fp",
2742 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2743 },
2744 {
2745 "helper access to packet: test15, cls helper fail range 1",
2746 .insns = {
2747 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2748 offsetof(struct __sk_buff, data)),
2749 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2750 offsetof(struct __sk_buff, data_end)),
2751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2754 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2756 BPF_MOV64_IMM(BPF_REG_2, 8),
2757 BPF_MOV64_IMM(BPF_REG_3, 0),
2758 BPF_MOV64_IMM(BPF_REG_4, 0),
2759 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2761 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002762 BPF_MOV64_IMM(BPF_REG_0, 0),
2763 BPF_EXIT_INSN(),
2764 },
2765 .result = REJECT,
2766 .errstr = "invalid access to packet",
2767 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2768 },
2769 {
2770 "helper access to packet: test16, cls helper fail range 2",
2771 .insns = {
2772 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2773 offsetof(struct __sk_buff, data)),
2774 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2775 offsetof(struct __sk_buff, data_end)),
2776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2777 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2779 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2780 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2781 BPF_MOV64_IMM(BPF_REG_2, -9),
2782 BPF_MOV64_IMM(BPF_REG_3, 0),
2783 BPF_MOV64_IMM(BPF_REG_4, 0),
2784 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2786 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002787 BPF_MOV64_IMM(BPF_REG_0, 0),
2788 BPF_EXIT_INSN(),
2789 },
2790 .result = REJECT,
2791 .errstr = "invalid access to packet",
2792 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2793 },
2794 {
2795 "helper access to packet: test17, cls helper fail range 3",
2796 .insns = {
2797 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2798 offsetof(struct __sk_buff, data)),
2799 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2800 offsetof(struct __sk_buff, data_end)),
2801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2802 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2804 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2806 BPF_MOV64_IMM(BPF_REG_2, ~0),
2807 BPF_MOV64_IMM(BPF_REG_3, 0),
2808 BPF_MOV64_IMM(BPF_REG_4, 0),
2809 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2811 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002812 BPF_MOV64_IMM(BPF_REG_0, 0),
2813 BPF_EXIT_INSN(),
2814 },
2815 .result = REJECT,
2816 .errstr = "invalid access to packet",
2817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2818 },
2819 {
2820 "helper access to packet: test18, cls helper fail range zero",
2821 .insns = {
2822 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2823 offsetof(struct __sk_buff, data)),
2824 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2825 offsetof(struct __sk_buff, data_end)),
2826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2829 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2830 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2831 BPF_MOV64_IMM(BPF_REG_2, 0),
2832 BPF_MOV64_IMM(BPF_REG_3, 0),
2833 BPF_MOV64_IMM(BPF_REG_4, 0),
2834 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2836 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002837 BPF_MOV64_IMM(BPF_REG_0, 0),
2838 BPF_EXIT_INSN(),
2839 },
2840 .result = REJECT,
2841 .errstr = "invalid access to packet",
2842 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2843 },
2844 {
2845 "helper access to packet: test19, pkt end as input",
2846 .insns = {
2847 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2848 offsetof(struct __sk_buff, data)),
2849 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2850 offsetof(struct __sk_buff, data_end)),
2851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2852 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2854 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
2856 BPF_MOV64_IMM(BPF_REG_2, 4),
2857 BPF_MOV64_IMM(BPF_REG_3, 0),
2858 BPF_MOV64_IMM(BPF_REG_4, 0),
2859 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2861 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002862 BPF_MOV64_IMM(BPF_REG_0, 0),
2863 BPF_EXIT_INSN(),
2864 },
2865 .result = REJECT,
2866 .errstr = "R1 type=pkt_end expected=fp",
2867 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2868 },
2869 {
2870 "helper access to packet: test20, wrong reg",
2871 .insns = {
2872 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2873 offsetof(struct __sk_buff, data)),
2874 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2875 offsetof(struct __sk_buff, data_end)),
2876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2877 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2879 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2880 BPF_MOV64_IMM(BPF_REG_2, 4),
2881 BPF_MOV64_IMM(BPF_REG_3, 0),
2882 BPF_MOV64_IMM(BPF_REG_4, 0),
2883 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2885 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002886 BPF_MOV64_IMM(BPF_REG_0, 0),
2887 BPF_EXIT_INSN(),
2888 },
2889 .result = REJECT,
2890 .errstr = "invalid access to packet",
2891 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2892 },
Josef Bacik48461132016-09-28 10:54:32 -04002893 {
2894 "valid map access into an array with a constant",
2895 .insns = {
2896 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2897 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2899 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2901 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002902 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002903 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2904 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002905 BPF_EXIT_INSN(),
2906 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002907 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04002908 .errstr_unpriv = "R0 leaks addr",
2909 .result_unpriv = REJECT,
2910 .result = ACCEPT,
2911 },
2912 {
2913 "valid map access into an array with a register",
2914 .insns = {
2915 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2918 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2920 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2922 BPF_MOV64_IMM(BPF_REG_1, 4),
2923 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2924 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002925 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2926 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002927 BPF_EXIT_INSN(),
2928 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002929 .fixup_map2 = { 3 },
2930 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002931 .result_unpriv = REJECT,
2932 .result = ACCEPT,
2933 },
2934 {
2935 "valid map access into an array with a variable",
2936 .insns = {
2937 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2940 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2942 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
2944 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2945 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
2946 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2947 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002948 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2949 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002950 BPF_EXIT_INSN(),
2951 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002952 .fixup_map2 = { 3 },
2953 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002954 .result_unpriv = REJECT,
2955 .result = ACCEPT,
2956 },
2957 {
2958 "valid map access into an array with a signed variable",
2959 .insns = {
2960 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2961 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2963 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2965 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002966 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
2967 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2968 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
2969 BPF_MOV32_IMM(BPF_REG_1, 0),
2970 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
2971 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
2972 BPF_MOV32_IMM(BPF_REG_1, 0),
2973 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
2974 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002975 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2976 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002977 BPF_EXIT_INSN(),
2978 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002979 .fixup_map2 = { 3 },
2980 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002981 .result_unpriv = REJECT,
2982 .result = ACCEPT,
2983 },
2984 {
2985 "invalid map access into an array with a constant",
2986 .insns = {
2987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2988 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2990 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002991 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2992 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002993 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2994 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
2995 offsetof(struct test_val, foo)),
2996 BPF_EXIT_INSN(),
2997 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002998 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04002999 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3000 .result = REJECT,
3001 },
3002 {
3003 "invalid map access into an array with a register",
3004 .insns = {
3005 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3006 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3008 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3010 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003011 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3012 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3013 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3014 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003015 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3016 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003017 BPF_EXIT_INSN(),
3018 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003019 .fixup_map2 = { 3 },
3020 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003021 .errstr = "R0 min value is outside of the array range",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003022 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003023 .result = REJECT,
3024 },
3025 {
3026 "invalid map access into an array with a variable",
3027 .insns = {
3028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3031 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3033 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3035 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3036 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3037 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003038 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3039 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003040 BPF_EXIT_INSN(),
3041 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003042 .fixup_map2 = { 3 },
3043 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003044 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003045 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003046 .result = REJECT,
3047 },
3048 {
3049 "invalid map access into an array with no floor check",
3050 .insns = {
3051 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3052 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3053 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3054 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003055 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3056 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003057 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3058 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3059 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3060 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3061 BPF_MOV32_IMM(BPF_REG_1, 0),
3062 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3063 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003064 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3065 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003066 BPF_EXIT_INSN(),
3067 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003068 .fixup_map2 = { 3 },
3069 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003070 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003071 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003072 .result = REJECT,
3073 },
3074 {
3075 "invalid map access into an array with a invalid max check",
3076 .insns = {
3077 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3078 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3080 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3082 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003083 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3084 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3085 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3086 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3087 BPF_MOV32_IMM(BPF_REG_1, 0),
3088 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3089 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003090 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3091 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003092 BPF_EXIT_INSN(),
3093 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003094 .fixup_map2 = { 3 },
3095 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003096 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003097 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003098 .result = REJECT,
3099 },
3100 {
3101 "invalid map access into an array with a invalid max check",
3102 .insns = {
3103 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3104 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3106 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3108 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3110 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3111 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3112 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3114 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3116 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3118 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003119 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3120 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003121 BPF_EXIT_INSN(),
3122 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003123 .fixup_map2 = { 3, 11 },
3124 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003125 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003126 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003127 .result = REJECT,
3128 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003129 {
3130 "multiple registers share map_lookup_elem result",
3131 .insns = {
3132 BPF_MOV64_IMM(BPF_REG_1, 10),
3133 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3134 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3135 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3136 BPF_LD_MAP_FD(BPF_REG_1, 0),
3137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3138 BPF_FUNC_map_lookup_elem),
3139 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3141 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3142 BPF_EXIT_INSN(),
3143 },
3144 .fixup_map1 = { 4 },
3145 .result = ACCEPT,
3146 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3147 },
3148 {
3149 "invalid memory access with multiple map_lookup_elem calls",
3150 .insns = {
3151 BPF_MOV64_IMM(BPF_REG_1, 10),
3152 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3155 BPF_LD_MAP_FD(BPF_REG_1, 0),
3156 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3157 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3159 BPF_FUNC_map_lookup_elem),
3160 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3164 BPF_FUNC_map_lookup_elem),
3165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3166 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3167 BPF_EXIT_INSN(),
3168 },
3169 .fixup_map1 = { 4 },
3170 .result = REJECT,
3171 .errstr = "R4 !read_ok",
3172 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3173 },
3174 {
3175 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3176 .insns = {
3177 BPF_MOV64_IMM(BPF_REG_1, 10),
3178 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3181 BPF_LD_MAP_FD(BPF_REG_1, 0),
3182 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3183 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3184 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3185 BPF_FUNC_map_lookup_elem),
3186 BPF_MOV64_IMM(BPF_REG_2, 10),
3187 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3188 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3189 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3191 BPF_FUNC_map_lookup_elem),
3192 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3194 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3195 BPF_EXIT_INSN(),
3196 },
3197 .fixup_map1 = { 4 },
3198 .result = ACCEPT,
3199 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3200 },
Josef Bacike9548902016-11-29 12:35:19 -05003201 {
Daniel Borkmanna08dd0d2016-12-15 01:30:06 +01003202 "multiple registers share map_lookup_elem bad reg type",
3203 .insns = {
3204 BPF_MOV64_IMM(BPF_REG_1, 10),
3205 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3206 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3208 BPF_LD_MAP_FD(BPF_REG_1, 0),
3209 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3210 BPF_FUNC_map_lookup_elem),
3211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3212 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3213 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3214 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3215 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3216 BPF_MOV64_IMM(BPF_REG_1, 1),
3217 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3218 BPF_MOV64_IMM(BPF_REG_1, 2),
3219 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3220 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3221 BPF_MOV64_IMM(BPF_REG_1, 3),
3222 BPF_EXIT_INSN(),
3223 },
3224 .fixup_map1 = { 4 },
3225 .result = REJECT,
3226 .errstr = "R3 invalid mem access 'inv'",
3227 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3228 },
3229 {
Josef Bacike9548902016-11-29 12:35:19 -05003230 "invalid map access from else condition",
3231 .insns = {
3232 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3235 BPF_LD_MAP_FD(BPF_REG_1, 0),
3236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3238 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3239 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3241 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3242 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3243 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3244 BPF_EXIT_INSN(),
3245 },
3246 .fixup_map2 = { 3 },
3247 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3248 .result = REJECT,
3249 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3250 .result_unpriv = REJECT,
3251 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08003252 {
3253 "constant register |= constant should keep constant type",
3254 .insns = {
3255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3257 BPF_MOV64_IMM(BPF_REG_2, 34),
3258 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3259 BPF_MOV64_IMM(BPF_REG_3, 0),
3260 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3261 BPF_EXIT_INSN(),
3262 },
3263 .result = ACCEPT,
3264 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3265 },
3266 {
3267 "constant register |= constant should not bypass stack boundary checks",
3268 .insns = {
3269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3271 BPF_MOV64_IMM(BPF_REG_2, 34),
3272 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3273 BPF_MOV64_IMM(BPF_REG_3, 0),
3274 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3275 BPF_EXIT_INSN(),
3276 },
3277 .errstr = "invalid stack type R1 off=-48 access_size=58",
3278 .result = REJECT,
3279 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3280 },
3281 {
3282 "constant register |= constant register should keep constant type",
3283 .insns = {
3284 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3286 BPF_MOV64_IMM(BPF_REG_2, 34),
3287 BPF_MOV64_IMM(BPF_REG_4, 13),
3288 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3289 BPF_MOV64_IMM(BPF_REG_3, 0),
3290 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3291 BPF_EXIT_INSN(),
3292 },
3293 .result = ACCEPT,
3294 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3295 },
3296 {
3297 "constant register |= constant register should not bypass stack boundary checks",
3298 .insns = {
3299 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3301 BPF_MOV64_IMM(BPF_REG_2, 34),
3302 BPF_MOV64_IMM(BPF_REG_4, 24),
3303 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3304 BPF_MOV64_IMM(BPF_REG_3, 0),
3305 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3306 BPF_EXIT_INSN(),
3307 },
3308 .errstr = "invalid stack type R1 off=-48 access_size=58",
3309 .result = REJECT,
3310 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3311 },
Thomas Graf3f731d82016-12-05 10:30:52 +01003312 {
3313 "invalid direct packet write for LWT_IN",
3314 .insns = {
3315 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3316 offsetof(struct __sk_buff, data)),
3317 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3318 offsetof(struct __sk_buff, data_end)),
3319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3321 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3322 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3323 BPF_MOV64_IMM(BPF_REG_0, 0),
3324 BPF_EXIT_INSN(),
3325 },
3326 .errstr = "cannot write into packet",
3327 .result = REJECT,
3328 .prog_type = BPF_PROG_TYPE_LWT_IN,
3329 },
3330 {
3331 "invalid direct packet write for LWT_OUT",
3332 .insns = {
3333 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3334 offsetof(struct __sk_buff, data)),
3335 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3336 offsetof(struct __sk_buff, data_end)),
3337 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3339 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3340 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3341 BPF_MOV64_IMM(BPF_REG_0, 0),
3342 BPF_EXIT_INSN(),
3343 },
3344 .errstr = "cannot write into packet",
3345 .result = REJECT,
3346 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3347 },
3348 {
3349 "direct packet write for LWT_XMIT",
3350 .insns = {
3351 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3352 offsetof(struct __sk_buff, data)),
3353 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3354 offsetof(struct __sk_buff, data_end)),
3355 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3357 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3358 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3359 BPF_MOV64_IMM(BPF_REG_0, 0),
3360 BPF_EXIT_INSN(),
3361 },
3362 .result = ACCEPT,
3363 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3364 },
3365 {
3366 "direct packet read for LWT_IN",
3367 .insns = {
3368 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3369 offsetof(struct __sk_buff, data)),
3370 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3371 offsetof(struct __sk_buff, data_end)),
3372 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3374 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3375 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3376 BPF_MOV64_IMM(BPF_REG_0, 0),
3377 BPF_EXIT_INSN(),
3378 },
3379 .result = ACCEPT,
3380 .prog_type = BPF_PROG_TYPE_LWT_IN,
3381 },
3382 {
3383 "direct packet read for LWT_OUT",
3384 .insns = {
3385 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3386 offsetof(struct __sk_buff, data)),
3387 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3388 offsetof(struct __sk_buff, data_end)),
3389 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3391 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3392 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3393 BPF_MOV64_IMM(BPF_REG_0, 0),
3394 BPF_EXIT_INSN(),
3395 },
3396 .result = ACCEPT,
3397 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3398 },
3399 {
3400 "direct packet read for LWT_XMIT",
3401 .insns = {
3402 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3403 offsetof(struct __sk_buff, data)),
3404 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3405 offsetof(struct __sk_buff, data_end)),
3406 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3408 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3409 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3410 BPF_MOV64_IMM(BPF_REG_0, 0),
3411 BPF_EXIT_INSN(),
3412 },
3413 .result = ACCEPT,
3414 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3415 },
3416 {
3417 "invalid access of tc_classid for LWT_IN",
3418 .insns = {
3419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3420 offsetof(struct __sk_buff, tc_classid)),
3421 BPF_EXIT_INSN(),
3422 },
3423 .result = REJECT,
3424 .errstr = "invalid bpf_context access",
3425 },
3426 {
3427 "invalid access of tc_classid for LWT_OUT",
3428 .insns = {
3429 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3430 offsetof(struct __sk_buff, tc_classid)),
3431 BPF_EXIT_INSN(),
3432 },
3433 .result = REJECT,
3434 .errstr = "invalid bpf_context access",
3435 },
3436 {
3437 "invalid access of tc_classid for LWT_XMIT",
3438 .insns = {
3439 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3440 offsetof(struct __sk_buff, tc_classid)),
3441 BPF_EXIT_INSN(),
3442 },
3443 .result = REJECT,
3444 .errstr = "invalid bpf_context access",
3445 },
Gianluca Borello57225692017-01-09 10:19:47 -08003446 {
3447 "helper access to map: full range",
3448 .insns = {
3449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3451 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3452 BPF_LD_MAP_FD(BPF_REG_1, 0),
3453 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3456 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3457 BPF_MOV64_IMM(BPF_REG_3, 0),
3458 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3459 BPF_EXIT_INSN(),
3460 },
3461 .fixup_map2 = { 3 },
3462 .result = ACCEPT,
3463 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3464 },
3465 {
3466 "helper access to map: partial range",
3467 .insns = {
3468 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3470 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3471 BPF_LD_MAP_FD(BPF_REG_1, 0),
3472 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3473 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3475 BPF_MOV64_IMM(BPF_REG_2, 8),
3476 BPF_MOV64_IMM(BPF_REG_3, 0),
3477 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3478 BPF_EXIT_INSN(),
3479 },
3480 .fixup_map2 = { 3 },
3481 .result = ACCEPT,
3482 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3483 },
3484 {
3485 "helper access to map: empty range",
3486 .insns = {
3487 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3489 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3490 BPF_LD_MAP_FD(BPF_REG_1, 0),
3491 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3492 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3493 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3494 BPF_MOV64_IMM(BPF_REG_2, 0),
3495 BPF_MOV64_IMM(BPF_REG_3, 0),
3496 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3497 BPF_EXIT_INSN(),
3498 },
3499 .fixup_map2 = { 3 },
3500 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3501 .result = REJECT,
3502 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3503 },
3504 {
3505 "helper access to map: out-of-bound range",
3506 .insns = {
3507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3509 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3510 BPF_LD_MAP_FD(BPF_REG_1, 0),
3511 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3514 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3515 BPF_MOV64_IMM(BPF_REG_3, 0),
3516 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3517 BPF_EXIT_INSN(),
3518 },
3519 .fixup_map2 = { 3 },
3520 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3521 .result = REJECT,
3522 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3523 },
3524 {
3525 "helper access to map: negative range",
3526 .insns = {
3527 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3529 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3530 BPF_LD_MAP_FD(BPF_REG_1, 0),
3531 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3533 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3534 BPF_MOV64_IMM(BPF_REG_2, -8),
3535 BPF_MOV64_IMM(BPF_REG_3, 0),
3536 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3537 BPF_EXIT_INSN(),
3538 },
3539 .fixup_map2 = { 3 },
3540 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3541 .result = REJECT,
3542 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3543 },
3544 {
3545 "helper access to adjusted map (via const imm): full range",
3546 .insns = {
3547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3549 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3550 BPF_LD_MAP_FD(BPF_REG_1, 0),
3551 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3555 offsetof(struct test_val, foo)),
3556 BPF_MOV64_IMM(BPF_REG_2,
3557 sizeof(struct test_val) -
3558 offsetof(struct test_val, foo)),
3559 BPF_MOV64_IMM(BPF_REG_3, 0),
3560 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3561 BPF_EXIT_INSN(),
3562 },
3563 .fixup_map2 = { 3 },
3564 .result = ACCEPT,
3565 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3566 },
3567 {
3568 "helper access to adjusted map (via const imm): partial range",
3569 .insns = {
3570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3572 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3573 BPF_LD_MAP_FD(BPF_REG_1, 0),
3574 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3576 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3578 offsetof(struct test_val, foo)),
3579 BPF_MOV64_IMM(BPF_REG_2, 8),
3580 BPF_MOV64_IMM(BPF_REG_3, 0),
3581 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3582 BPF_EXIT_INSN(),
3583 },
3584 .fixup_map2 = { 3 },
3585 .result = ACCEPT,
3586 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3587 },
3588 {
3589 "helper access to adjusted map (via const imm): empty range",
3590 .insns = {
3591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3593 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3594 BPF_LD_MAP_FD(BPF_REG_1, 0),
3595 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3597 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3599 offsetof(struct test_val, foo)),
3600 BPF_MOV64_IMM(BPF_REG_2, 0),
3601 BPF_MOV64_IMM(BPF_REG_3, 0),
3602 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3603 BPF_EXIT_INSN(),
3604 },
3605 .fixup_map2 = { 3 },
3606 .errstr = "R1 min value is outside of the array range",
3607 .result = REJECT,
3608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3609 },
3610 {
3611 "helper access to adjusted map (via const imm): out-of-bound range",
3612 .insns = {
3613 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3615 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3616 BPF_LD_MAP_FD(BPF_REG_1, 0),
3617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3621 offsetof(struct test_val, foo)),
3622 BPF_MOV64_IMM(BPF_REG_2,
3623 sizeof(struct test_val) -
3624 offsetof(struct test_val, foo) + 8),
3625 BPF_MOV64_IMM(BPF_REG_3, 0),
3626 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3627 BPF_EXIT_INSN(),
3628 },
3629 .fixup_map2 = { 3 },
3630 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3631 .result = REJECT,
3632 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3633 },
3634 {
3635 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3636 .insns = {
3637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3639 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3640 BPF_LD_MAP_FD(BPF_REG_1, 0),
3641 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3642 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3645 offsetof(struct test_val, foo)),
3646 BPF_MOV64_IMM(BPF_REG_2, -8),
3647 BPF_MOV64_IMM(BPF_REG_3, 0),
3648 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3649 BPF_EXIT_INSN(),
3650 },
3651 .fixup_map2 = { 3 },
3652 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3653 .result = REJECT,
3654 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3655 },
3656 {
3657 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3658 .insns = {
3659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3661 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3662 BPF_LD_MAP_FD(BPF_REG_1, 0),
3663 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3665 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3667 offsetof(struct test_val, foo)),
3668 BPF_MOV64_IMM(BPF_REG_2, -1),
3669 BPF_MOV64_IMM(BPF_REG_3, 0),
3670 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3671 BPF_EXIT_INSN(),
3672 },
3673 .fixup_map2 = { 3 },
3674 .errstr = "R1 min value is outside of the array range",
3675 .result = REJECT,
3676 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3677 },
3678 {
3679 "helper access to adjusted map (via const reg): full range",
3680 .insns = {
3681 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3683 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3684 BPF_LD_MAP_FD(BPF_REG_1, 0),
3685 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3688 BPF_MOV64_IMM(BPF_REG_3,
3689 offsetof(struct test_val, foo)),
3690 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3691 BPF_MOV64_IMM(BPF_REG_2,
3692 sizeof(struct test_val) -
3693 offsetof(struct test_val, foo)),
3694 BPF_MOV64_IMM(BPF_REG_3, 0),
3695 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3696 BPF_EXIT_INSN(),
3697 },
3698 .fixup_map2 = { 3 },
3699 .result = ACCEPT,
3700 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3701 },
3702 {
3703 "helper access to adjusted map (via const reg): partial range",
3704 .insns = {
3705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3707 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3708 BPF_LD_MAP_FD(BPF_REG_1, 0),
3709 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3712 BPF_MOV64_IMM(BPF_REG_3,
3713 offsetof(struct test_val, foo)),
3714 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3715 BPF_MOV64_IMM(BPF_REG_2, 8),
3716 BPF_MOV64_IMM(BPF_REG_3, 0),
3717 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3718 BPF_EXIT_INSN(),
3719 },
3720 .fixup_map2 = { 3 },
3721 .result = ACCEPT,
3722 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3723 },
3724 {
3725 "helper access to adjusted map (via const reg): empty range",
3726 .insns = {
3727 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3729 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3730 BPF_LD_MAP_FD(BPF_REG_1, 0),
3731 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3733 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3734 BPF_MOV64_IMM(BPF_REG_3, 0),
3735 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3736 BPF_MOV64_IMM(BPF_REG_2, 0),
3737 BPF_MOV64_IMM(BPF_REG_3, 0),
3738 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3739 BPF_EXIT_INSN(),
3740 },
3741 .fixup_map2 = { 3 },
3742 .errstr = "R1 min value is outside of the array range",
3743 .result = REJECT,
3744 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3745 },
3746 {
3747 "helper access to adjusted map (via const reg): out-of-bound range",
3748 .insns = {
3749 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3751 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3752 BPF_LD_MAP_FD(BPF_REG_1, 0),
3753 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3754 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3756 BPF_MOV64_IMM(BPF_REG_3,
3757 offsetof(struct test_val, foo)),
3758 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3759 BPF_MOV64_IMM(BPF_REG_2,
3760 sizeof(struct test_val) -
3761 offsetof(struct test_val, foo) + 8),
3762 BPF_MOV64_IMM(BPF_REG_3, 0),
3763 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3764 BPF_EXIT_INSN(),
3765 },
3766 .fixup_map2 = { 3 },
3767 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3768 .result = REJECT,
3769 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3770 },
3771 {
3772 "helper access to adjusted map (via const reg): negative range (> adjustment)",
3773 .insns = {
3774 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3776 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3777 BPF_LD_MAP_FD(BPF_REG_1, 0),
3778 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3780 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3781 BPF_MOV64_IMM(BPF_REG_3,
3782 offsetof(struct test_val, foo)),
3783 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3784 BPF_MOV64_IMM(BPF_REG_2, -8),
3785 BPF_MOV64_IMM(BPF_REG_3, 0),
3786 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3787 BPF_EXIT_INSN(),
3788 },
3789 .fixup_map2 = { 3 },
3790 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3791 .result = REJECT,
3792 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3793 },
3794 {
3795 "helper access to adjusted map (via const reg): negative range (< adjustment)",
3796 .insns = {
3797 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3799 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3800 BPF_LD_MAP_FD(BPF_REG_1, 0),
3801 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3804 BPF_MOV64_IMM(BPF_REG_3,
3805 offsetof(struct test_val, foo)),
3806 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3807 BPF_MOV64_IMM(BPF_REG_2, -1),
3808 BPF_MOV64_IMM(BPF_REG_3, 0),
3809 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3810 BPF_EXIT_INSN(),
3811 },
3812 .fixup_map2 = { 3 },
3813 .errstr = "R1 min value is outside of the array range",
3814 .result = REJECT,
3815 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3816 },
3817 {
3818 "helper access to adjusted map (via variable): full range",
3819 .insns = {
3820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3822 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3823 BPF_LD_MAP_FD(BPF_REG_1, 0),
3824 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3825 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3827 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3828 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3829 offsetof(struct test_val, foo), 4),
3830 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3831 BPF_MOV64_IMM(BPF_REG_2,
3832 sizeof(struct test_val) -
3833 offsetof(struct test_val, foo)),
3834 BPF_MOV64_IMM(BPF_REG_3, 0),
3835 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3836 BPF_EXIT_INSN(),
3837 },
3838 .fixup_map2 = { 3 },
3839 .result = ACCEPT,
3840 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3841 },
3842 {
3843 "helper access to adjusted map (via variable): partial range",
3844 .insns = {
3845 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3847 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3848 BPF_LD_MAP_FD(BPF_REG_1, 0),
3849 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3850 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3852 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3853 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3854 offsetof(struct test_val, foo), 4),
3855 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3856 BPF_MOV64_IMM(BPF_REG_2, 8),
3857 BPF_MOV64_IMM(BPF_REG_3, 0),
3858 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3859 BPF_EXIT_INSN(),
3860 },
3861 .fixup_map2 = { 3 },
3862 .result = ACCEPT,
3863 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3864 },
3865 {
3866 "helper access to adjusted map (via variable): empty range",
3867 .insns = {
3868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3870 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3871 BPF_LD_MAP_FD(BPF_REG_1, 0),
3872 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3876 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3877 offsetof(struct test_val, foo), 4),
3878 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3879 BPF_MOV64_IMM(BPF_REG_2, 0),
3880 BPF_MOV64_IMM(BPF_REG_3, 0),
3881 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3882 BPF_EXIT_INSN(),
3883 },
3884 .fixup_map2 = { 3 },
3885 .errstr = "R1 min value is outside of the array range",
3886 .result = REJECT,
3887 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3888 },
3889 {
3890 "helper access to adjusted map (via variable): no max check",
3891 .insns = {
3892 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3894 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3895 BPF_LD_MAP_FD(BPF_REG_1, 0),
3896 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3897 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3899 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3900 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3901 BPF_MOV64_IMM(BPF_REG_2, 0),
3902 BPF_MOV64_IMM(BPF_REG_3, 0),
3903 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3904 BPF_EXIT_INSN(),
3905 },
3906 .fixup_map2 = { 3 },
3907 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
3908 .result = REJECT,
3909 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3910 },
3911 {
3912 "helper access to adjusted map (via variable): wrong max check",
3913 .insns = {
3914 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3916 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3917 BPF_LD_MAP_FD(BPF_REG_1, 0),
3918 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3919 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3920 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3922 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3923 offsetof(struct test_val, foo), 4),
3924 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3925 BPF_MOV64_IMM(BPF_REG_2,
3926 sizeof(struct test_val) -
3927 offsetof(struct test_val, foo) + 1),
3928 BPF_MOV64_IMM(BPF_REG_3, 0),
3929 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3930 BPF_EXIT_INSN(),
3931 },
3932 .fixup_map2 = { 3 },
3933 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
3934 .result = REJECT,
3935 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3936 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08003937 {
3938 "map element value is preserved across register spilling",
3939 .insns = {
3940 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3942 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3943 BPF_LD_MAP_FD(BPF_REG_1, 0),
3944 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3946 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
3947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
3949 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3950 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
3951 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
3952 BPF_EXIT_INSN(),
3953 },
3954 .fixup_map2 = { 3 },
3955 .errstr_unpriv = "R0 leaks addr",
3956 .result = ACCEPT,
3957 .result_unpriv = REJECT,
3958 },
3959 {
3960 "map element value (adjusted) is preserved across register spilling",
3961 .insns = {
3962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3964 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3965 BPF_LD_MAP_FD(BPF_REG_1, 0),
3966 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3967 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
3969 offsetof(struct test_val, foo)),
3970 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
3971 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
3973 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3974 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
3975 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
3976 BPF_EXIT_INSN(),
3977 },
3978 .fixup_map2 = { 3 },
3979 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3980 .result = ACCEPT,
3981 .result_unpriv = REJECT,
3982 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08003983 {
3984 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
3985 .insns = {
3986 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
3988 BPF_MOV64_IMM(BPF_REG_0, 0),
3989 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
3990 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
3991 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
3992 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
3993 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
3994 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
3995 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
3996 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3997 BPF_MOV64_IMM(BPF_REG_2, 16),
3998 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
3999 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4000 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4001 BPF_MOV64_IMM(BPF_REG_4, 0),
4002 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4003 BPF_MOV64_IMM(BPF_REG_3, 0),
4004 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4005 BPF_MOV64_IMM(BPF_REG_0, 0),
4006 BPF_EXIT_INSN(),
4007 },
4008 .result = ACCEPT,
4009 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4010 },
4011 {
4012 "helper access to variable memory: stack, bitwise AND, zero included",
4013 .insns = {
4014 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4016 BPF_MOV64_IMM(BPF_REG_2, 16),
4017 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4018 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4019 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4020 BPF_MOV64_IMM(BPF_REG_3, 0),
4021 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4022 BPF_EXIT_INSN(),
4023 },
4024 .errstr = "invalid stack type R1 off=-64 access_size=0",
4025 .result = REJECT,
4026 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4027 },
4028 {
4029 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4030 .insns = {
4031 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4033 BPF_MOV64_IMM(BPF_REG_2, 16),
4034 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4035 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4036 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4037 BPF_MOV64_IMM(BPF_REG_4, 0),
4038 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4039 BPF_MOV64_IMM(BPF_REG_3, 0),
4040 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4041 BPF_MOV64_IMM(BPF_REG_0, 0),
4042 BPF_EXIT_INSN(),
4043 },
4044 .errstr = "invalid stack type R1 off=-64 access_size=65",
4045 .result = REJECT,
4046 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4047 },
4048 {
4049 "helper access to variable memory: stack, JMP, correct bounds",
4050 .insns = {
4051 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4053 BPF_MOV64_IMM(BPF_REG_0, 0),
4054 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4055 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4056 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4058 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4061 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4062 BPF_MOV64_IMM(BPF_REG_2, 16),
4063 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4064 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4065 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4066 BPF_MOV64_IMM(BPF_REG_4, 0),
4067 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4068 BPF_MOV64_IMM(BPF_REG_3, 0),
4069 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4070 BPF_MOV64_IMM(BPF_REG_0, 0),
4071 BPF_EXIT_INSN(),
4072 },
4073 .result = ACCEPT,
4074 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4075 },
4076 {
4077 "helper access to variable memory: stack, JMP (signed), correct bounds",
4078 .insns = {
4079 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4081 BPF_MOV64_IMM(BPF_REG_0, 0),
4082 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4083 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4084 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4085 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4086 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4087 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4088 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4089 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4090 BPF_MOV64_IMM(BPF_REG_2, 16),
4091 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4092 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4093 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4094 BPF_MOV64_IMM(BPF_REG_4, 0),
4095 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4096 BPF_MOV64_IMM(BPF_REG_3, 0),
4097 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4098 BPF_MOV64_IMM(BPF_REG_0, 0),
4099 BPF_EXIT_INSN(),
4100 },
4101 .result = ACCEPT,
4102 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4103 },
4104 {
4105 "helper access to variable memory: stack, JMP, bounds + offset",
4106 .insns = {
4107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4109 BPF_MOV64_IMM(BPF_REG_2, 16),
4110 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4111 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4112 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4113 BPF_MOV64_IMM(BPF_REG_4, 0),
4114 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4116 BPF_MOV64_IMM(BPF_REG_3, 0),
4117 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4118 BPF_MOV64_IMM(BPF_REG_0, 0),
4119 BPF_EXIT_INSN(),
4120 },
4121 .errstr = "invalid stack type R1 off=-64 access_size=65",
4122 .result = REJECT,
4123 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4124 },
4125 {
4126 "helper access to variable memory: stack, JMP, wrong max",
4127 .insns = {
4128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4130 BPF_MOV64_IMM(BPF_REG_2, 16),
4131 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4132 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4133 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4134 BPF_MOV64_IMM(BPF_REG_4, 0),
4135 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4136 BPF_MOV64_IMM(BPF_REG_3, 0),
4137 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4138 BPF_MOV64_IMM(BPF_REG_0, 0),
4139 BPF_EXIT_INSN(),
4140 },
4141 .errstr = "invalid stack type R1 off=-64 access_size=65",
4142 .result = REJECT,
4143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4144 },
4145 {
4146 "helper access to variable memory: stack, JMP, no max check",
4147 .insns = {
4148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4150 BPF_MOV64_IMM(BPF_REG_2, 16),
4151 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4152 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4153 BPF_MOV64_IMM(BPF_REG_4, 0),
4154 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4155 BPF_MOV64_IMM(BPF_REG_3, 0),
4156 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4157 BPF_MOV64_IMM(BPF_REG_0, 0),
4158 BPF_EXIT_INSN(),
4159 },
4160 .errstr = "R2 unbounded memory access",
4161 .result = REJECT,
4162 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4163 },
4164 {
4165 "helper access to variable memory: stack, JMP, no min check",
4166 .insns = {
4167 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4169 BPF_MOV64_IMM(BPF_REG_2, 16),
4170 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4171 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4172 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4173 BPF_MOV64_IMM(BPF_REG_3, 0),
4174 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4175 BPF_MOV64_IMM(BPF_REG_0, 0),
4176 BPF_EXIT_INSN(),
4177 },
4178 .errstr = "invalid stack type R1 off=-64 access_size=0",
4179 .result = REJECT,
4180 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4181 },
4182 {
4183 "helper access to variable memory: stack, JMP (signed), no min check",
4184 .insns = {
4185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4187 BPF_MOV64_IMM(BPF_REG_2, 16),
4188 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4189 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4190 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4191 BPF_MOV64_IMM(BPF_REG_3, 0),
4192 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4193 BPF_MOV64_IMM(BPF_REG_0, 0),
4194 BPF_EXIT_INSN(),
4195 },
4196 .errstr = "R2 min value is negative",
4197 .result = REJECT,
4198 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4199 },
4200 {
4201 "helper access to variable memory: map, JMP, correct bounds",
4202 .insns = {
4203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4205 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4206 BPF_LD_MAP_FD(BPF_REG_1, 0),
4207 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4210 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4211 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4212 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4213 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4214 sizeof(struct test_val), 4),
4215 BPF_MOV64_IMM(BPF_REG_4, 0),
4216 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4217 BPF_MOV64_IMM(BPF_REG_3, 0),
4218 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4219 BPF_MOV64_IMM(BPF_REG_0, 0),
4220 BPF_EXIT_INSN(),
4221 },
4222 .fixup_map2 = { 3 },
4223 .result = ACCEPT,
4224 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4225 },
4226 {
4227 "helper access to variable memory: map, JMP, wrong max",
4228 .insns = {
4229 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4231 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4232 BPF_LD_MAP_FD(BPF_REG_1, 0),
4233 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4234 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4236 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4237 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4238 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4239 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4240 sizeof(struct test_val) + 1, 4),
4241 BPF_MOV64_IMM(BPF_REG_4, 0),
4242 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4243 BPF_MOV64_IMM(BPF_REG_3, 0),
4244 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4245 BPF_MOV64_IMM(BPF_REG_0, 0),
4246 BPF_EXIT_INSN(),
4247 },
4248 .fixup_map2 = { 3 },
4249 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4250 .result = REJECT,
4251 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4252 },
4253 {
4254 "helper access to variable memory: map adjusted, JMP, correct bounds",
4255 .insns = {
4256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4258 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4259 BPF_LD_MAP_FD(BPF_REG_1, 0),
4260 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4264 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4265 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4266 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4267 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4268 sizeof(struct test_val) - 20, 4),
4269 BPF_MOV64_IMM(BPF_REG_4, 0),
4270 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4271 BPF_MOV64_IMM(BPF_REG_3, 0),
4272 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4273 BPF_MOV64_IMM(BPF_REG_0, 0),
4274 BPF_EXIT_INSN(),
4275 },
4276 .fixup_map2 = { 3 },
4277 .result = ACCEPT,
4278 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4279 },
4280 {
4281 "helper access to variable memory: map adjusted, JMP, wrong max",
4282 .insns = {
4283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4285 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4286 BPF_LD_MAP_FD(BPF_REG_1, 0),
4287 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4289 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4291 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4292 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4293 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4294 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4295 sizeof(struct test_val) - 19, 4),
4296 BPF_MOV64_IMM(BPF_REG_4, 0),
4297 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4298 BPF_MOV64_IMM(BPF_REG_3, 0),
4299 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4300 BPF_MOV64_IMM(BPF_REG_0, 0),
4301 BPF_EXIT_INSN(),
4302 },
4303 .fixup_map2 = { 3 },
4304 .errstr = "R1 min value is outside of the array range",
4305 .result = REJECT,
4306 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4307 },
4308 {
4309 "helper access to variable memory: size > 0 not allowed on NULL",
4310 .insns = {
4311 BPF_MOV64_IMM(BPF_REG_1, 0),
4312 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004313 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4314 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004315 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4316 BPF_MOV64_IMM(BPF_REG_3, 0),
4317 BPF_MOV64_IMM(BPF_REG_4, 0),
4318 BPF_MOV64_IMM(BPF_REG_5, 0),
4319 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4320 BPF_EXIT_INSN(),
4321 },
4322 .errstr = "R1 type=imm expected=fp",
4323 .result = REJECT,
4324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4325 },
4326 {
4327 "helper access to variable memory: size = 0 not allowed on != NULL",
4328 .insns = {
4329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4331 BPF_MOV64_IMM(BPF_REG_2, 0),
4332 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4333 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4334 BPF_MOV64_IMM(BPF_REG_3, 0),
4335 BPF_MOV64_IMM(BPF_REG_4, 0),
4336 BPF_MOV64_IMM(BPF_REG_5, 0),
4337 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4338 BPF_EXIT_INSN(),
4339 },
4340 .errstr = "invalid stack type R1 off=-8 access_size=0",
4341 .result = REJECT,
4342 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4343 },
4344 {
4345 "helper access to variable memory: 8 bytes leak",
4346 .insns = {
4347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4349 BPF_MOV64_IMM(BPF_REG_0, 0),
4350 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4351 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4352 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4354 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4355 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4356 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4357 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4359 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004360 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4362 BPF_MOV64_IMM(BPF_REG_3, 0),
4363 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4364 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4365 BPF_EXIT_INSN(),
4366 },
4367 .errstr = "invalid indirect read from stack off -64+32 size 64",
4368 .result = REJECT,
4369 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4370 },
4371 {
4372 "helper access to variable memory: 8 bytes no leak (init memory)",
4373 .insns = {
4374 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4375 BPF_MOV64_IMM(BPF_REG_0, 0),
4376 BPF_MOV64_IMM(BPF_REG_0, 0),
4377 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4378 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4379 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4380 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4381 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4382 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4383 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4384 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4386 BPF_MOV64_IMM(BPF_REG_2, 0),
4387 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4389 BPF_MOV64_IMM(BPF_REG_3, 0),
4390 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4391 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4392 BPF_EXIT_INSN(),
4393 },
4394 .result = ACCEPT,
4395 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4396 },
Josef Bacik29200c12017-02-03 16:25:23 -05004397 {
4398 "invalid and of negative number",
4399 .insns = {
4400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4403 BPF_LD_MAP_FD(BPF_REG_1, 0),
4404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4405 BPF_FUNC_map_lookup_elem),
4406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4407 BPF_MOV64_IMM(BPF_REG_1, 6),
4408 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4409 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4410 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4411 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4412 offsetof(struct test_val, foo)),
4413 BPF_EXIT_INSN(),
4414 },
4415 .fixup_map2 = { 3 },
4416 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4417 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4418 .result = REJECT,
4419 .result_unpriv = REJECT,
4420 },
4421 {
4422 "invalid range check",
4423 .insns = {
4424 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4425 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4427 BPF_LD_MAP_FD(BPF_REG_1, 0),
4428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4429 BPF_FUNC_map_lookup_elem),
4430 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4431 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4432 BPF_MOV64_IMM(BPF_REG_9, 1),
4433 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4434 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4435 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4436 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4437 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4438 BPF_MOV32_IMM(BPF_REG_3, 1),
4439 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4440 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4441 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4442 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4443 BPF_MOV64_REG(BPF_REG_0, 0),
4444 BPF_EXIT_INSN(),
4445 },
4446 .fixup_map2 = { 3 },
4447 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4448 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4449 .result = REJECT,
4450 .result_unpriv = REJECT,
4451 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004452};
4453
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004454static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004455{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004456 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004457
4458 for (len = MAX_INSNS - 1; len > 0; --len)
4459 if (fp[len].code != 0 || fp[len].imm != 0)
4460 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004461 return len + 1;
4462}
4463
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004464static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004465{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004466 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004467
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004468 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
4469 size_value, max_elem, BPF_F_NO_PREALLOC);
4470 if (fd < 0)
4471 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004472
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004473 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004474}
4475
4476static int create_prog_array(void)
4477{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004478 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004479
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004480 fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
4481 sizeof(int), 4, 0);
4482 if (fd < 0)
4483 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004484
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004485 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004486}
4487
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004488static char bpf_vlog[32768];
4489
4490static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4491 int *fd_f1, int *fd_f2, int *fd_f3)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004492{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004493 int *fixup_map1 = test->fixup_map1;
4494 int *fixup_map2 = test->fixup_map2;
4495 int *fixup_prog = test->fixup_prog;
4496
4497 /* Allocating HTs with 1 elem is fine here, since we only test
4498 * for verifier and not do a runtime lookup, so the only thing
4499 * that really matters is value size in this case.
4500 */
4501 if (*fixup_map1) {
4502 *fd_f1 = create_map(sizeof(long long), 1);
4503 do {
4504 prog[*fixup_map1].imm = *fd_f1;
4505 fixup_map1++;
4506 } while (*fixup_map1);
4507 }
4508
4509 if (*fixup_map2) {
4510 *fd_f2 = create_map(sizeof(struct test_val), 1);
4511 do {
4512 prog[*fixup_map2].imm = *fd_f2;
4513 fixup_map2++;
4514 } while (*fixup_map2);
4515 }
4516
4517 if (*fixup_prog) {
4518 *fd_f3 = create_prog_array();
4519 do {
4520 prog[*fixup_prog].imm = *fd_f3;
4521 fixup_prog++;
4522 } while (*fixup_prog);
4523 }
4524}
4525
4526static void do_test_single(struct bpf_test *test, bool unpriv,
4527 int *passes, int *errors)
4528{
4529 struct bpf_insn *prog = test->insns;
4530 int prog_len = probe_filter_length(prog);
4531 int prog_type = test->prog_type;
4532 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4533 int fd_prog, expected_ret;
4534 const char *expected_err;
4535
4536 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
4537
4538 fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
4539 prog, prog_len * sizeof(struct bpf_insn),
4540 "GPL", bpf_vlog, sizeof(bpf_vlog));
4541
4542 expected_ret = unpriv && test->result_unpriv != UNDEF ?
4543 test->result_unpriv : test->result;
4544 expected_err = unpriv && test->errstr_unpriv ?
4545 test->errstr_unpriv : test->errstr;
4546 if (expected_ret == ACCEPT) {
4547 if (fd_prog < 0) {
4548 printf("FAIL\nFailed to load prog '%s'!\n",
4549 strerror(errno));
4550 goto fail_log;
4551 }
4552 } else {
4553 if (fd_prog >= 0) {
4554 printf("FAIL\nUnexpected success to load!\n");
4555 goto fail_log;
4556 }
4557 if (!strstr(bpf_vlog, expected_err)) {
4558 printf("FAIL\nUnexpected error message!\n");
4559 goto fail_log;
4560 }
4561 }
4562
4563 (*passes)++;
4564 printf("OK\n");
4565close_fds:
4566 close(fd_prog);
4567 close(fd_f1);
4568 close(fd_f2);
4569 close(fd_f3);
4570 sched_yield();
4571 return;
4572fail_log:
4573 (*errors)++;
4574 printf("%s", bpf_vlog);
4575 goto close_fds;
4576}
4577
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004578static bool is_admin(void)
4579{
4580 cap_t caps;
4581 cap_flag_value_t sysadmin = CAP_CLEAR;
4582 const cap_value_t cap_val = CAP_SYS_ADMIN;
4583
4584 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
4585 perror("cap_get_flag");
4586 return false;
4587 }
4588 caps = cap_get_proc();
4589 if (!caps) {
4590 perror("cap_get_proc");
4591 return false;
4592 }
4593 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
4594 perror("cap_get_flag");
4595 if (cap_free(caps))
4596 perror("cap_free");
4597 return (sysadmin == CAP_SET);
4598}
4599
4600static int set_admin(bool admin)
4601{
4602 cap_t caps;
4603 const cap_value_t cap_val = CAP_SYS_ADMIN;
4604 int ret = -1;
4605
4606 caps = cap_get_proc();
4607 if (!caps) {
4608 perror("cap_get_proc");
4609 return -1;
4610 }
4611 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
4612 admin ? CAP_SET : CAP_CLEAR)) {
4613 perror("cap_set_flag");
4614 goto out;
4615 }
4616 if (cap_set_proc(caps)) {
4617 perror("cap_set_proc");
4618 goto out;
4619 }
4620 ret = 0;
4621out:
4622 if (cap_free(caps))
4623 perror("cap_free");
4624 return ret;
4625}
4626
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004627static int do_test(bool unpriv, unsigned int from, unsigned int to)
4628{
4629 int i, passes = 0, errors = 0;
4630
4631 for (i = from; i < to; i++) {
4632 struct bpf_test *test = &tests[i];
4633
4634 /* Program types that are not supported by non-root we
4635 * skip right away.
4636 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004637 if (!test->prog_type) {
4638 if (!unpriv)
4639 set_admin(false);
4640 printf("#%d/u %s ", i, test->descr);
4641 do_test_single(test, true, &passes, &errors);
4642 if (!unpriv)
4643 set_admin(true);
4644 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004645
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004646 if (!unpriv) {
4647 printf("#%d/p %s ", i, test->descr);
4648 do_test_single(test, false, &passes, &errors);
4649 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004650 }
4651
4652 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
4653 return errors ? -errors : 0;
4654}
4655
4656int main(int argc, char **argv)
4657{
4658 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
4659 struct rlimit rlim = { 1 << 20, 1 << 20 };
4660 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004661 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004662
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004663 if (argc == 3) {
4664 unsigned int l = atoi(argv[argc - 2]);
4665 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004666
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004667 if (l < to && u < to) {
4668 from = l;
4669 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004670 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004671 } else if (argc == 2) {
4672 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004673
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004674 if (t < to) {
4675 from = t;
4676 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004677 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004678 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004679
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004680 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
4681 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004682}