blob: df194e1d56c22f0f8a9e5aca722fc3897379060a [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011#include <stdio.h>
12#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070015#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070016#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020017#include <sched.h>
18
Alexei Starovoitovbf508872015-10-07 22:23:23 -070019#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020021#include <linux/unistd.h>
22#include <linux/filter.h>
23#include <linux/bpf_perf_event.h>
24#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070025
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020026#include "../../../include/linux/filter.h"
27
28#include "bpf_sys.h"
29
30#ifndef ARRAY_SIZE
31# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
32#endif
33
34#define MAX_INSNS 512
35#define MAX_FIXUPS 8
Alexei Starovoitovbf508872015-10-07 22:23:23 -070036
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070037struct bpf_test {
38 const char *descr;
39 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020040 int fixup_map1[MAX_FIXUPS];
41 int fixup_map2[MAX_FIXUPS];
42 int fixup_prog[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070043 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070044 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070045 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070046 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070047 ACCEPT,
48 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070049 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070050 enum bpf_prog_type prog_type;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070051};
52
Josef Bacik48461132016-09-28 10:54:32 -040053/* Note we want this to be 64 bit aligned so that the end of our array is
54 * actually the end of the structure.
55 */
56#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040057
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020058struct test_val {
59 unsigned int index;
60 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040061};
62
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070063static struct bpf_test tests[] = {
64 {
65 "add+sub+mul",
66 .insns = {
67 BPF_MOV64_IMM(BPF_REG_1, 1),
68 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
69 BPF_MOV64_IMM(BPF_REG_2, 3),
70 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
71 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
72 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
73 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
74 BPF_EXIT_INSN(),
75 },
76 .result = ACCEPT,
77 },
78 {
79 "unreachable",
80 .insns = {
81 BPF_EXIT_INSN(),
82 BPF_EXIT_INSN(),
83 },
84 .errstr = "unreachable",
85 .result = REJECT,
86 },
87 {
88 "unreachable2",
89 .insns = {
90 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
91 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
92 BPF_EXIT_INSN(),
93 },
94 .errstr = "unreachable",
95 .result = REJECT,
96 },
97 {
98 "out of range jump",
99 .insns = {
100 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
101 BPF_EXIT_INSN(),
102 },
103 .errstr = "jump out of range",
104 .result = REJECT,
105 },
106 {
107 "out of range jump2",
108 .insns = {
109 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
110 BPF_EXIT_INSN(),
111 },
112 .errstr = "jump out of range",
113 .result = REJECT,
114 },
115 {
116 "test1 ld_imm64",
117 .insns = {
118 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
119 BPF_LD_IMM64(BPF_REG_0, 0),
120 BPF_LD_IMM64(BPF_REG_0, 0),
121 BPF_LD_IMM64(BPF_REG_0, 1),
122 BPF_LD_IMM64(BPF_REG_0, 1),
123 BPF_MOV64_IMM(BPF_REG_0, 2),
124 BPF_EXIT_INSN(),
125 },
126 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700127 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700128 .result = REJECT,
129 },
130 {
131 "test2 ld_imm64",
132 .insns = {
133 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
134 BPF_LD_IMM64(BPF_REG_0, 0),
135 BPF_LD_IMM64(BPF_REG_0, 0),
136 BPF_LD_IMM64(BPF_REG_0, 1),
137 BPF_LD_IMM64(BPF_REG_0, 1),
138 BPF_EXIT_INSN(),
139 },
140 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700141 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700142 .result = REJECT,
143 },
144 {
145 "test3 ld_imm64",
146 .insns = {
147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
148 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
149 BPF_LD_IMM64(BPF_REG_0, 0),
150 BPF_LD_IMM64(BPF_REG_0, 0),
151 BPF_LD_IMM64(BPF_REG_0, 1),
152 BPF_LD_IMM64(BPF_REG_0, 1),
153 BPF_EXIT_INSN(),
154 },
155 .errstr = "invalid bpf_ld_imm64 insn",
156 .result = REJECT,
157 },
158 {
159 "test4 ld_imm64",
160 .insns = {
161 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
162 BPF_EXIT_INSN(),
163 },
164 .errstr = "invalid bpf_ld_imm64 insn",
165 .result = REJECT,
166 },
167 {
168 "test5 ld_imm64",
169 .insns = {
170 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
171 },
172 .errstr = "invalid bpf_ld_imm64 insn",
173 .result = REJECT,
174 },
175 {
176 "no bpf_exit",
177 .insns = {
178 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
179 },
180 .errstr = "jump out of range",
181 .result = REJECT,
182 },
183 {
184 "loop (back-edge)",
185 .insns = {
186 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
187 BPF_EXIT_INSN(),
188 },
189 .errstr = "back-edge",
190 .result = REJECT,
191 },
192 {
193 "loop2 (back-edge)",
194 .insns = {
195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
197 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
198 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
199 BPF_EXIT_INSN(),
200 },
201 .errstr = "back-edge",
202 .result = REJECT,
203 },
204 {
205 "conditional loop",
206 .insns = {
207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
209 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
210 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
211 BPF_EXIT_INSN(),
212 },
213 .errstr = "back-edge",
214 .result = REJECT,
215 },
216 {
217 "read uninitialized register",
218 .insns = {
219 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
220 BPF_EXIT_INSN(),
221 },
222 .errstr = "R2 !read_ok",
223 .result = REJECT,
224 },
225 {
226 "read invalid register",
227 .insns = {
228 BPF_MOV64_REG(BPF_REG_0, -1),
229 BPF_EXIT_INSN(),
230 },
231 .errstr = "R15 is invalid",
232 .result = REJECT,
233 },
234 {
235 "program doesn't init R0 before exit",
236 .insns = {
237 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "R0 !read_ok",
241 .result = REJECT,
242 },
243 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700244 "program doesn't init R0 before exit in all branches",
245 .insns = {
246 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
247 BPF_MOV64_IMM(BPF_REG_0, 1),
248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
249 BPF_EXIT_INSN(),
250 },
251 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700252 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700253 .result = REJECT,
254 },
255 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700256 "stack out of bounds",
257 .insns = {
258 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
259 BPF_EXIT_INSN(),
260 },
261 .errstr = "invalid stack",
262 .result = REJECT,
263 },
264 {
265 "invalid call insn1",
266 .insns = {
267 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
268 BPF_EXIT_INSN(),
269 },
270 .errstr = "BPF_CALL uses reserved",
271 .result = REJECT,
272 },
273 {
274 "invalid call insn2",
275 .insns = {
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
277 BPF_EXIT_INSN(),
278 },
279 .errstr = "BPF_CALL uses reserved",
280 .result = REJECT,
281 },
282 {
283 "invalid function call",
284 .insns = {
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
286 BPF_EXIT_INSN(),
287 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100288 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700289 .result = REJECT,
290 },
291 {
292 "uninitialized stack1",
293 .insns = {
294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
296 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
298 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700299 BPF_EXIT_INSN(),
300 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200301 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700302 .errstr = "invalid indirect read from stack",
303 .result = REJECT,
304 },
305 {
306 "uninitialized stack2",
307 .insns = {
308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
309 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
310 BPF_EXIT_INSN(),
311 },
312 .errstr = "invalid read from stack",
313 .result = REJECT,
314 },
315 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200316 "invalid argument register",
317 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
319 BPF_FUNC_get_cgroup_classid),
320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
321 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200322 BPF_EXIT_INSN(),
323 },
324 .errstr = "R1 !read_ok",
325 .result = REJECT,
326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 },
328 {
329 "non-invalid argument register",
330 .insns = {
331 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
333 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200334 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
336 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200337 BPF_EXIT_INSN(),
338 },
339 .result = ACCEPT,
340 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
341 },
342 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700343 "check valid spill/fill",
344 .insns = {
345 /* spill R1(ctx) into stack */
346 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700347 /* fill it back into R2 */
348 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700349 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100350 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
351 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700352 BPF_EXIT_INSN(),
353 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700354 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700355 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700356 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700357 },
358 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200359 "check valid spill/fill, skb mark",
360 .insns = {
361 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
362 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
363 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
364 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
365 offsetof(struct __sk_buff, mark)),
366 BPF_EXIT_INSN(),
367 },
368 .result = ACCEPT,
369 .result_unpriv = ACCEPT,
370 },
371 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700372 "check corrupted spill/fill",
373 .insns = {
374 /* spill R1(ctx) into stack */
375 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700376 /* mess up with R1 pointer on stack */
377 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700378 /* fill back into R0 should fail */
379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700380 BPF_EXIT_INSN(),
381 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700382 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700383 .errstr = "corrupted spill",
384 .result = REJECT,
385 },
386 {
387 "invalid src register in STX",
388 .insns = {
389 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
390 BPF_EXIT_INSN(),
391 },
392 .errstr = "R15 is invalid",
393 .result = REJECT,
394 },
395 {
396 "invalid dst register in STX",
397 .insns = {
398 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
399 BPF_EXIT_INSN(),
400 },
401 .errstr = "R14 is invalid",
402 .result = REJECT,
403 },
404 {
405 "invalid dst register in ST",
406 .insns = {
407 BPF_ST_MEM(BPF_B, 14, -1, -1),
408 BPF_EXIT_INSN(),
409 },
410 .errstr = "R14 is invalid",
411 .result = REJECT,
412 },
413 {
414 "invalid src register in LDX",
415 .insns = {
416 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
417 BPF_EXIT_INSN(),
418 },
419 .errstr = "R12 is invalid",
420 .result = REJECT,
421 },
422 {
423 "invalid dst register in LDX",
424 .insns = {
425 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
426 BPF_EXIT_INSN(),
427 },
428 .errstr = "R11 is invalid",
429 .result = REJECT,
430 },
431 {
432 "junk insn",
433 .insns = {
434 BPF_RAW_INSN(0, 0, 0, 0, 0),
435 BPF_EXIT_INSN(),
436 },
437 .errstr = "invalid BPF_LD_IMM",
438 .result = REJECT,
439 },
440 {
441 "junk insn2",
442 .insns = {
443 BPF_RAW_INSN(1, 0, 0, 0, 0),
444 BPF_EXIT_INSN(),
445 },
446 .errstr = "BPF_LDX uses reserved fields",
447 .result = REJECT,
448 },
449 {
450 "junk insn3",
451 .insns = {
452 BPF_RAW_INSN(-1, 0, 0, 0, 0),
453 BPF_EXIT_INSN(),
454 },
455 .errstr = "invalid BPF_ALU opcode f0",
456 .result = REJECT,
457 },
458 {
459 "junk insn4",
460 .insns = {
461 BPF_RAW_INSN(-1, -1, -1, -1, -1),
462 BPF_EXIT_INSN(),
463 },
464 .errstr = "invalid BPF_ALU opcode f0",
465 .result = REJECT,
466 },
467 {
468 "junk insn5",
469 .insns = {
470 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
471 BPF_EXIT_INSN(),
472 },
473 .errstr = "BPF_ALU uses reserved fields",
474 .result = REJECT,
475 },
476 {
477 "misaligned read from stack",
478 .insns = {
479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
480 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
481 BPF_EXIT_INSN(),
482 },
483 .errstr = "misaligned access",
484 .result = REJECT,
485 },
486 {
487 "invalid map_fd for function call",
488 .insns = {
489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
490 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
492 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
494 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700495 BPF_EXIT_INSN(),
496 },
497 .errstr = "fd 0 is not pointing to valid bpf_map",
498 .result = REJECT,
499 },
500 {
501 "don't check return value before access",
502 .insns = {
503 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
504 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
506 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
508 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700509 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
510 BPF_EXIT_INSN(),
511 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200512 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700513 .errstr = "R0 invalid mem access 'map_value_or_null'",
514 .result = REJECT,
515 },
516 {
517 "access memory with incorrect alignment",
518 .insns = {
519 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
522 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
524 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
526 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
527 BPF_EXIT_INSN(),
528 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200529 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700530 .errstr = "misaligned access",
531 .result = REJECT,
532 },
533 {
534 "sometimes access memory with incorrect alignment",
535 .insns = {
536 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
537 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
539 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
541 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
543 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
544 BPF_EXIT_INSN(),
545 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
546 BPF_EXIT_INSN(),
547 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200548 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700549 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700550 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700551 .result = REJECT,
552 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700553 {
554 "jump test 1",
555 .insns = {
556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
557 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
559 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
561 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
563 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
565 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
567 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
569 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
570 BPF_MOV64_IMM(BPF_REG_0, 0),
571 BPF_EXIT_INSN(),
572 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700573 .errstr_unpriv = "R1 pointer comparison",
574 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700575 .result = ACCEPT,
576 },
577 {
578 "jump test 2",
579 .insns = {
580 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
582 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
583 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
585 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
586 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
588 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
589 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
591 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
592 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
594 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
595 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
597 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
598 BPF_MOV64_IMM(BPF_REG_0, 0),
599 BPF_EXIT_INSN(),
600 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700601 .errstr_unpriv = "R1 pointer comparison",
602 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700603 .result = ACCEPT,
604 },
605 {
606 "jump test 3",
607 .insns = {
608 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
610 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
612 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
614 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
616 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
618 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
620 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
622 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
624 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
625 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
626 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
628 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
630 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
632 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
634 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700635 BPF_EXIT_INSN(),
636 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200637 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700638 .errstr_unpriv = "R1 pointer comparison",
639 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700640 .result = ACCEPT,
641 },
642 {
643 "jump test 4",
644 .insns = {
645 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
646 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
651 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
653 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
654 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
655 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
656 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
658 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
659 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
660 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
672 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
674 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
685 BPF_MOV64_IMM(BPF_REG_0, 0),
686 BPF_EXIT_INSN(),
687 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700688 .errstr_unpriv = "R1 pointer comparison",
689 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700690 .result = ACCEPT,
691 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700692 {
693 "jump test 5",
694 .insns = {
695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
696 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
697 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
698 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
699 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
700 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
701 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
702 BPF_MOV64_IMM(BPF_REG_0, 0),
703 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
704 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
705 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
706 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
707 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
708 BPF_MOV64_IMM(BPF_REG_0, 0),
709 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
710 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
711 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
712 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
713 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
714 BPF_MOV64_IMM(BPF_REG_0, 0),
715 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
716 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
717 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
718 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
719 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
720 BPF_MOV64_IMM(BPF_REG_0, 0),
721 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
722 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
723 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
724 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
725 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
726 BPF_MOV64_IMM(BPF_REG_0, 0),
727 BPF_EXIT_INSN(),
728 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700729 .errstr_unpriv = "R1 pointer comparison",
730 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700731 .result = ACCEPT,
732 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700733 {
734 "access skb fields ok",
735 .insns = {
736 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
737 offsetof(struct __sk_buff, len)),
738 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
740 offsetof(struct __sk_buff, mark)),
741 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
742 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
743 offsetof(struct __sk_buff, pkt_type)),
744 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
745 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
746 offsetof(struct __sk_buff, queue_mapping)),
747 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700748 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
749 offsetof(struct __sk_buff, protocol)),
750 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
751 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
752 offsetof(struct __sk_buff, vlan_present)),
753 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
754 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
755 offsetof(struct __sk_buff, vlan_tci)),
756 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700757 BPF_EXIT_INSN(),
758 },
759 .result = ACCEPT,
760 },
761 {
762 "access skb fields bad1",
763 .insns = {
764 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
765 BPF_EXIT_INSN(),
766 },
767 .errstr = "invalid bpf_context access",
768 .result = REJECT,
769 },
770 {
771 "access skb fields bad2",
772 .insns = {
773 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
774 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
777 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
779 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700780 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
781 BPF_EXIT_INSN(),
782 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
784 offsetof(struct __sk_buff, pkt_type)),
785 BPF_EXIT_INSN(),
786 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200787 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700788 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700789 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700790 .result = REJECT,
791 },
792 {
793 "access skb fields bad3",
794 .insns = {
795 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
797 offsetof(struct __sk_buff, pkt_type)),
798 BPF_EXIT_INSN(),
799 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
802 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
804 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700805 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
806 BPF_EXIT_INSN(),
807 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
808 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
809 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200810 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700811 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700812 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700813 .result = REJECT,
814 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700815 {
816 "access skb fields bad4",
817 .insns = {
818 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
819 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
820 offsetof(struct __sk_buff, len)),
821 BPF_MOV64_IMM(BPF_REG_0, 0),
822 BPF_EXIT_INSN(),
823 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
824 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
826 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
828 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700829 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
830 BPF_EXIT_INSN(),
831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
832 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
833 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200834 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700835 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700836 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700837 .result = REJECT,
838 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700839 {
840 "check skb->mark is not writeable by sockets",
841 .insns = {
842 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
843 offsetof(struct __sk_buff, mark)),
844 BPF_EXIT_INSN(),
845 },
846 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700847 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700848 .result = REJECT,
849 },
850 {
851 "check skb->tc_index is not writeable by sockets",
852 .insns = {
853 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
854 offsetof(struct __sk_buff, tc_index)),
855 BPF_EXIT_INSN(),
856 },
857 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700858 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700859 .result = REJECT,
860 },
861 {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100862 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700863 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100864 BPF_MOV64_IMM(BPF_REG_0, 0),
865 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
866 offsetof(struct __sk_buff, cb[0])),
867 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
868 offsetof(struct __sk_buff, cb[0]) + 1),
869 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
870 offsetof(struct __sk_buff, cb[0]) + 2),
871 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
872 offsetof(struct __sk_buff, cb[0]) + 3),
873 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
874 offsetof(struct __sk_buff, cb[1])),
875 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
876 offsetof(struct __sk_buff, cb[1]) + 1),
877 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
878 offsetof(struct __sk_buff, cb[1]) + 2),
879 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
880 offsetof(struct __sk_buff, cb[1]) + 3),
881 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
882 offsetof(struct __sk_buff, cb[2])),
883 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
884 offsetof(struct __sk_buff, cb[2]) + 1),
885 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
886 offsetof(struct __sk_buff, cb[2]) + 2),
887 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
888 offsetof(struct __sk_buff, cb[2]) + 3),
889 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
890 offsetof(struct __sk_buff, cb[3])),
891 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
892 offsetof(struct __sk_buff, cb[3]) + 1),
893 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
894 offsetof(struct __sk_buff, cb[3]) + 2),
895 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
896 offsetof(struct __sk_buff, cb[3]) + 3),
897 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
898 offsetof(struct __sk_buff, cb[4])),
899 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
900 offsetof(struct __sk_buff, cb[4]) + 1),
901 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
902 offsetof(struct __sk_buff, cb[4]) + 2),
903 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
904 offsetof(struct __sk_buff, cb[4]) + 3),
905 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
906 offsetof(struct __sk_buff, cb[0])),
907 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
908 offsetof(struct __sk_buff, cb[0]) + 1),
909 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
910 offsetof(struct __sk_buff, cb[0]) + 2),
911 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
912 offsetof(struct __sk_buff, cb[0]) + 3),
913 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, cb[1])),
915 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
916 offsetof(struct __sk_buff, cb[1]) + 1),
917 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
918 offsetof(struct __sk_buff, cb[1]) + 2),
919 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
920 offsetof(struct __sk_buff, cb[1]) + 3),
921 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
922 offsetof(struct __sk_buff, cb[2])),
923 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
924 offsetof(struct __sk_buff, cb[2]) + 1),
925 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
926 offsetof(struct __sk_buff, cb[2]) + 2),
927 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
928 offsetof(struct __sk_buff, cb[2]) + 3),
929 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
930 offsetof(struct __sk_buff, cb[3])),
931 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
932 offsetof(struct __sk_buff, cb[3]) + 1),
933 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
934 offsetof(struct __sk_buff, cb[3]) + 2),
935 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
936 offsetof(struct __sk_buff, cb[3]) + 3),
937 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
938 offsetof(struct __sk_buff, cb[4])),
939 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
940 offsetof(struct __sk_buff, cb[4]) + 1),
941 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
942 offsetof(struct __sk_buff, cb[4]) + 2),
943 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
944 offsetof(struct __sk_buff, cb[4]) + 3),
945 BPF_EXIT_INSN(),
946 },
947 .result = ACCEPT,
948 },
949 {
950 "check cb access: byte, oob 1",
951 .insns = {
952 BPF_MOV64_IMM(BPF_REG_0, 0),
953 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
954 offsetof(struct __sk_buff, cb[4]) + 4),
955 BPF_EXIT_INSN(),
956 },
957 .errstr = "invalid bpf_context access",
958 .result = REJECT,
959 },
960 {
961 "check cb access: byte, oob 2",
962 .insns = {
963 BPF_MOV64_IMM(BPF_REG_0, 0),
964 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
965 offsetof(struct __sk_buff, cb[0]) - 1),
966 BPF_EXIT_INSN(),
967 },
968 .errstr = "invalid bpf_context access",
969 .result = REJECT,
970 },
971 {
972 "check cb access: byte, oob 3",
973 .insns = {
974 BPF_MOV64_IMM(BPF_REG_0, 0),
975 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
976 offsetof(struct __sk_buff, cb[4]) + 4),
977 BPF_EXIT_INSN(),
978 },
979 .errstr = "invalid bpf_context access",
980 .result = REJECT,
981 },
982 {
983 "check cb access: byte, oob 4",
984 .insns = {
985 BPF_MOV64_IMM(BPF_REG_0, 0),
986 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
987 offsetof(struct __sk_buff, cb[0]) - 1),
988 BPF_EXIT_INSN(),
989 },
990 .errstr = "invalid bpf_context access",
991 .result = REJECT,
992 },
993 {
994 "check cb access: byte, wrong type",
995 .insns = {
996 BPF_MOV64_IMM(BPF_REG_0, 0),
997 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700998 offsetof(struct __sk_buff, cb[0])),
999 BPF_EXIT_INSN(),
1000 },
1001 .errstr = "invalid bpf_context access",
1002 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001003 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1004 },
1005 {
1006 "check cb access: half",
1007 .insns = {
1008 BPF_MOV64_IMM(BPF_REG_0, 0),
1009 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1010 offsetof(struct __sk_buff, cb[0])),
1011 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1012 offsetof(struct __sk_buff, cb[0]) + 2),
1013 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1014 offsetof(struct __sk_buff, cb[1])),
1015 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1016 offsetof(struct __sk_buff, cb[1]) + 2),
1017 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1018 offsetof(struct __sk_buff, cb[2])),
1019 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1020 offsetof(struct __sk_buff, cb[2]) + 2),
1021 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1022 offsetof(struct __sk_buff, cb[3])),
1023 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1024 offsetof(struct __sk_buff, cb[3]) + 2),
1025 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1026 offsetof(struct __sk_buff, cb[4])),
1027 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1028 offsetof(struct __sk_buff, cb[4]) + 2),
1029 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1030 offsetof(struct __sk_buff, cb[0])),
1031 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1032 offsetof(struct __sk_buff, cb[0]) + 2),
1033 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1034 offsetof(struct __sk_buff, cb[1])),
1035 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1036 offsetof(struct __sk_buff, cb[1]) + 2),
1037 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1038 offsetof(struct __sk_buff, cb[2])),
1039 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1040 offsetof(struct __sk_buff, cb[2]) + 2),
1041 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1042 offsetof(struct __sk_buff, cb[3])),
1043 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1044 offsetof(struct __sk_buff, cb[3]) + 2),
1045 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1046 offsetof(struct __sk_buff, cb[4])),
1047 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1048 offsetof(struct __sk_buff, cb[4]) + 2),
1049 BPF_EXIT_INSN(),
1050 },
1051 .result = ACCEPT,
1052 },
1053 {
1054 "check cb access: half, unaligned",
1055 .insns = {
1056 BPF_MOV64_IMM(BPF_REG_0, 0),
1057 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1058 offsetof(struct __sk_buff, cb[0]) + 1),
1059 BPF_EXIT_INSN(),
1060 },
1061 .errstr = "misaligned access",
1062 .result = REJECT,
1063 },
1064 {
1065 "check cb access: half, oob 1",
1066 .insns = {
1067 BPF_MOV64_IMM(BPF_REG_0, 0),
1068 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1069 offsetof(struct __sk_buff, cb[4]) + 4),
1070 BPF_EXIT_INSN(),
1071 },
1072 .errstr = "invalid bpf_context access",
1073 .result = REJECT,
1074 },
1075 {
1076 "check cb access: half, oob 2",
1077 .insns = {
1078 BPF_MOV64_IMM(BPF_REG_0, 0),
1079 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1080 offsetof(struct __sk_buff, cb[0]) - 2),
1081 BPF_EXIT_INSN(),
1082 },
1083 .errstr = "invalid bpf_context access",
1084 .result = REJECT,
1085 },
1086 {
1087 "check cb access: half, oob 3",
1088 .insns = {
1089 BPF_MOV64_IMM(BPF_REG_0, 0),
1090 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1091 offsetof(struct __sk_buff, cb[4]) + 4),
1092 BPF_EXIT_INSN(),
1093 },
1094 .errstr = "invalid bpf_context access",
1095 .result = REJECT,
1096 },
1097 {
1098 "check cb access: half, oob 4",
1099 .insns = {
1100 BPF_MOV64_IMM(BPF_REG_0, 0),
1101 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1102 offsetof(struct __sk_buff, cb[0]) - 2),
1103 BPF_EXIT_INSN(),
1104 },
1105 .errstr = "invalid bpf_context access",
1106 .result = REJECT,
1107 },
1108 {
1109 "check cb access: half, wrong type",
1110 .insns = {
1111 BPF_MOV64_IMM(BPF_REG_0, 0),
1112 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1113 offsetof(struct __sk_buff, cb[0])),
1114 BPF_EXIT_INSN(),
1115 },
1116 .errstr = "invalid bpf_context access",
1117 .result = REJECT,
1118 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1119 },
1120 {
1121 "check cb access: word",
1122 .insns = {
1123 BPF_MOV64_IMM(BPF_REG_0, 0),
1124 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1125 offsetof(struct __sk_buff, cb[0])),
1126 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1127 offsetof(struct __sk_buff, cb[1])),
1128 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1129 offsetof(struct __sk_buff, cb[2])),
1130 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1131 offsetof(struct __sk_buff, cb[3])),
1132 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1133 offsetof(struct __sk_buff, cb[4])),
1134 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1135 offsetof(struct __sk_buff, cb[0])),
1136 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1137 offsetof(struct __sk_buff, cb[1])),
1138 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1139 offsetof(struct __sk_buff, cb[2])),
1140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1141 offsetof(struct __sk_buff, cb[3])),
1142 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1143 offsetof(struct __sk_buff, cb[4])),
1144 BPF_EXIT_INSN(),
1145 },
1146 .result = ACCEPT,
1147 },
1148 {
1149 "check cb access: word, unaligned 1",
1150 .insns = {
1151 BPF_MOV64_IMM(BPF_REG_0, 0),
1152 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1153 offsetof(struct __sk_buff, cb[0]) + 2),
1154 BPF_EXIT_INSN(),
1155 },
1156 .errstr = "misaligned access",
1157 .result = REJECT,
1158 },
1159 {
1160 "check cb access: word, unaligned 2",
1161 .insns = {
1162 BPF_MOV64_IMM(BPF_REG_0, 0),
1163 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1164 offsetof(struct __sk_buff, cb[4]) + 1),
1165 BPF_EXIT_INSN(),
1166 },
1167 .errstr = "misaligned access",
1168 .result = REJECT,
1169 },
1170 {
1171 "check cb access: word, unaligned 3",
1172 .insns = {
1173 BPF_MOV64_IMM(BPF_REG_0, 0),
1174 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1175 offsetof(struct __sk_buff, cb[4]) + 2),
1176 BPF_EXIT_INSN(),
1177 },
1178 .errstr = "misaligned access",
1179 .result = REJECT,
1180 },
1181 {
1182 "check cb access: word, unaligned 4",
1183 .insns = {
1184 BPF_MOV64_IMM(BPF_REG_0, 0),
1185 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1186 offsetof(struct __sk_buff, cb[4]) + 3),
1187 BPF_EXIT_INSN(),
1188 },
1189 .errstr = "misaligned access",
1190 .result = REJECT,
1191 },
1192 {
1193 "check cb access: double",
1194 .insns = {
1195 BPF_MOV64_IMM(BPF_REG_0, 0),
1196 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1197 offsetof(struct __sk_buff, cb[0])),
1198 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1199 offsetof(struct __sk_buff, cb[2])),
1200 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1201 offsetof(struct __sk_buff, cb[0])),
1202 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1203 offsetof(struct __sk_buff, cb[2])),
1204 BPF_EXIT_INSN(),
1205 },
1206 .result = ACCEPT,
1207 },
1208 {
1209 "check cb access: double, unaligned 1",
1210 .insns = {
1211 BPF_MOV64_IMM(BPF_REG_0, 0),
1212 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1213 offsetof(struct __sk_buff, cb[1])),
1214 BPF_EXIT_INSN(),
1215 },
1216 .errstr = "misaligned access",
1217 .result = REJECT,
1218 },
1219 {
1220 "check cb access: double, unaligned 2",
1221 .insns = {
1222 BPF_MOV64_IMM(BPF_REG_0, 0),
1223 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1224 offsetof(struct __sk_buff, cb[3])),
1225 BPF_EXIT_INSN(),
1226 },
1227 .errstr = "misaligned access",
1228 .result = REJECT,
1229 },
1230 {
1231 "check cb access: double, oob 1",
1232 .insns = {
1233 BPF_MOV64_IMM(BPF_REG_0, 0),
1234 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1235 offsetof(struct __sk_buff, cb[4])),
1236 BPF_EXIT_INSN(),
1237 },
1238 .errstr = "invalid bpf_context access",
1239 .result = REJECT,
1240 },
1241 {
1242 "check cb access: double, oob 2",
1243 .insns = {
1244 BPF_MOV64_IMM(BPF_REG_0, 0),
1245 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1246 offsetof(struct __sk_buff, cb[4]) + 8),
1247 BPF_EXIT_INSN(),
1248 },
1249 .errstr = "invalid bpf_context access",
1250 .result = REJECT,
1251 },
1252 {
1253 "check cb access: double, oob 3",
1254 .insns = {
1255 BPF_MOV64_IMM(BPF_REG_0, 0),
1256 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1257 offsetof(struct __sk_buff, cb[0]) - 8),
1258 BPF_EXIT_INSN(),
1259 },
1260 .errstr = "invalid bpf_context access",
1261 .result = REJECT,
1262 },
1263 {
1264 "check cb access: double, oob 4",
1265 .insns = {
1266 BPF_MOV64_IMM(BPF_REG_0, 0),
1267 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1268 offsetof(struct __sk_buff, cb[4])),
1269 BPF_EXIT_INSN(),
1270 },
1271 .errstr = "invalid bpf_context access",
1272 .result = REJECT,
1273 },
1274 {
1275 "check cb access: double, oob 5",
1276 .insns = {
1277 BPF_MOV64_IMM(BPF_REG_0, 0),
1278 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1279 offsetof(struct __sk_buff, cb[4]) + 8),
1280 BPF_EXIT_INSN(),
1281 },
1282 .errstr = "invalid bpf_context access",
1283 .result = REJECT,
1284 },
1285 {
1286 "check cb access: double, oob 6",
1287 .insns = {
1288 BPF_MOV64_IMM(BPF_REG_0, 0),
1289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1290 offsetof(struct __sk_buff, cb[0]) - 8),
1291 BPF_EXIT_INSN(),
1292 },
1293 .errstr = "invalid bpf_context access",
1294 .result = REJECT,
1295 },
1296 {
1297 "check cb access: double, wrong type",
1298 .insns = {
1299 BPF_MOV64_IMM(BPF_REG_0, 0),
1300 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1301 offsetof(struct __sk_buff, cb[0])),
1302 BPF_EXIT_INSN(),
1303 },
1304 .errstr = "invalid bpf_context access",
1305 .result = REJECT,
1306 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001307 },
1308 {
1309 "check out of range skb->cb access",
1310 .insns = {
1311 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001312 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001313 BPF_EXIT_INSN(),
1314 },
1315 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001316 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001317 .result = REJECT,
1318 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1319 },
1320 {
1321 "write skb fields from socket prog",
1322 .insns = {
1323 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1324 offsetof(struct __sk_buff, cb[4])),
1325 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1326 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1327 offsetof(struct __sk_buff, mark)),
1328 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1329 offsetof(struct __sk_buff, tc_index)),
1330 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1331 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1332 offsetof(struct __sk_buff, cb[0])),
1333 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1334 offsetof(struct __sk_buff, cb[2])),
1335 BPF_EXIT_INSN(),
1336 },
1337 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001338 .errstr_unpriv = "R1 leaks addr",
1339 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001340 },
1341 {
1342 "write skb fields from tc_cls_act prog",
1343 .insns = {
1344 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, cb[0])),
1346 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1347 offsetof(struct __sk_buff, mark)),
1348 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, tc_index)),
1350 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1351 offsetof(struct __sk_buff, tc_index)),
1352 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1353 offsetof(struct __sk_buff, cb[3])),
1354 BPF_EXIT_INSN(),
1355 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001356 .errstr_unpriv = "",
1357 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001358 .result = ACCEPT,
1359 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1360 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001361 {
1362 "PTR_TO_STACK store/load",
1363 .insns = {
1364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1366 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1367 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1368 BPF_EXIT_INSN(),
1369 },
1370 .result = ACCEPT,
1371 },
1372 {
1373 "PTR_TO_STACK store/load - bad alignment on off",
1374 .insns = {
1375 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1377 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1378 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1379 BPF_EXIT_INSN(),
1380 },
1381 .result = REJECT,
1382 .errstr = "misaligned access off -6 size 8",
1383 },
1384 {
1385 "PTR_TO_STACK store/load - bad alignment on reg",
1386 .insns = {
1387 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1389 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1390 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1391 BPF_EXIT_INSN(),
1392 },
1393 .result = REJECT,
1394 .errstr = "misaligned access off -2 size 8",
1395 },
1396 {
1397 "PTR_TO_STACK store/load - out of bounds low",
1398 .insns = {
1399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1401 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1402 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1403 BPF_EXIT_INSN(),
1404 },
1405 .result = REJECT,
1406 .errstr = "invalid stack off=-79992 size=8",
1407 },
1408 {
1409 "PTR_TO_STACK store/load - out of bounds high",
1410 .insns = {
1411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1413 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1414 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1415 BPF_EXIT_INSN(),
1416 },
1417 .result = REJECT,
1418 .errstr = "invalid stack off=0 size=8",
1419 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001420 {
1421 "unpriv: return pointer",
1422 .insns = {
1423 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1424 BPF_EXIT_INSN(),
1425 },
1426 .result = ACCEPT,
1427 .result_unpriv = REJECT,
1428 .errstr_unpriv = "R0 leaks addr",
1429 },
1430 {
1431 "unpriv: add const to pointer",
1432 .insns = {
1433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1434 BPF_MOV64_IMM(BPF_REG_0, 0),
1435 BPF_EXIT_INSN(),
1436 },
1437 .result = ACCEPT,
1438 .result_unpriv = REJECT,
1439 .errstr_unpriv = "R1 pointer arithmetic",
1440 },
1441 {
1442 "unpriv: add pointer to pointer",
1443 .insns = {
1444 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1445 BPF_MOV64_IMM(BPF_REG_0, 0),
1446 BPF_EXIT_INSN(),
1447 },
1448 .result = ACCEPT,
1449 .result_unpriv = REJECT,
1450 .errstr_unpriv = "R1 pointer arithmetic",
1451 },
1452 {
1453 "unpriv: neg pointer",
1454 .insns = {
1455 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1456 BPF_MOV64_IMM(BPF_REG_0, 0),
1457 BPF_EXIT_INSN(),
1458 },
1459 .result = ACCEPT,
1460 .result_unpriv = REJECT,
1461 .errstr_unpriv = "R1 pointer arithmetic",
1462 },
1463 {
1464 "unpriv: cmp pointer with const",
1465 .insns = {
1466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1467 BPF_MOV64_IMM(BPF_REG_0, 0),
1468 BPF_EXIT_INSN(),
1469 },
1470 .result = ACCEPT,
1471 .result_unpriv = REJECT,
1472 .errstr_unpriv = "R1 pointer comparison",
1473 },
1474 {
1475 "unpriv: cmp pointer with pointer",
1476 .insns = {
1477 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1478 BPF_MOV64_IMM(BPF_REG_0, 0),
1479 BPF_EXIT_INSN(),
1480 },
1481 .result = ACCEPT,
1482 .result_unpriv = REJECT,
1483 .errstr_unpriv = "R10 pointer comparison",
1484 },
1485 {
1486 "unpriv: check that printk is disallowed",
1487 .insns = {
1488 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1489 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1491 BPF_MOV64_IMM(BPF_REG_2, 8),
1492 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1494 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001495 BPF_MOV64_IMM(BPF_REG_0, 0),
1496 BPF_EXIT_INSN(),
1497 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001498 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001499 .result_unpriv = REJECT,
1500 .result = ACCEPT,
1501 },
1502 {
1503 "unpriv: pass pointer to helper function",
1504 .insns = {
1505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1508 BPF_LD_MAP_FD(BPF_REG_1, 0),
1509 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1510 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1512 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001513 BPF_MOV64_IMM(BPF_REG_0, 0),
1514 BPF_EXIT_INSN(),
1515 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001516 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001517 .errstr_unpriv = "R4 leaks addr",
1518 .result_unpriv = REJECT,
1519 .result = ACCEPT,
1520 },
1521 {
1522 "unpriv: indirectly pass pointer on stack to helper function",
1523 .insns = {
1524 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1525 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1527 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001528 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1529 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001530 BPF_MOV64_IMM(BPF_REG_0, 0),
1531 BPF_EXIT_INSN(),
1532 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001533 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001534 .errstr = "invalid indirect read from stack off -8+0 size 8",
1535 .result = REJECT,
1536 },
1537 {
1538 "unpriv: mangle pointer on stack 1",
1539 .insns = {
1540 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1541 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1542 BPF_MOV64_IMM(BPF_REG_0, 0),
1543 BPF_EXIT_INSN(),
1544 },
1545 .errstr_unpriv = "attempt to corrupt spilled",
1546 .result_unpriv = REJECT,
1547 .result = ACCEPT,
1548 },
1549 {
1550 "unpriv: mangle pointer on stack 2",
1551 .insns = {
1552 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1553 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1554 BPF_MOV64_IMM(BPF_REG_0, 0),
1555 BPF_EXIT_INSN(),
1556 },
1557 .errstr_unpriv = "attempt to corrupt spilled",
1558 .result_unpriv = REJECT,
1559 .result = ACCEPT,
1560 },
1561 {
1562 "unpriv: read pointer from stack in small chunks",
1563 .insns = {
1564 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1565 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1566 BPF_MOV64_IMM(BPF_REG_0, 0),
1567 BPF_EXIT_INSN(),
1568 },
1569 .errstr = "invalid size",
1570 .result = REJECT,
1571 },
1572 {
1573 "unpriv: write pointer into ctx",
1574 .insns = {
1575 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1576 BPF_MOV64_IMM(BPF_REG_0, 0),
1577 BPF_EXIT_INSN(),
1578 },
1579 .errstr_unpriv = "R1 leaks addr",
1580 .result_unpriv = REJECT,
1581 .errstr = "invalid bpf_context access",
1582 .result = REJECT,
1583 },
1584 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001585 "unpriv: spill/fill of ctx",
1586 .insns = {
1587 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1589 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1590 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1591 BPF_MOV64_IMM(BPF_REG_0, 0),
1592 BPF_EXIT_INSN(),
1593 },
1594 .result = ACCEPT,
1595 },
1596 {
1597 "unpriv: spill/fill of ctx 2",
1598 .insns = {
1599 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1601 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1602 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1604 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001605 BPF_EXIT_INSN(),
1606 },
1607 .result = ACCEPT,
1608 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1609 },
1610 {
1611 "unpriv: spill/fill of ctx 3",
1612 .insns = {
1613 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1615 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1616 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1617 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1619 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001620 BPF_EXIT_INSN(),
1621 },
1622 .result = REJECT,
1623 .errstr = "R1 type=fp expected=ctx",
1624 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1625 },
1626 {
1627 "unpriv: spill/fill of ctx 4",
1628 .insns = {
1629 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1631 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1632 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001633 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1634 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001635 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1637 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001638 BPF_EXIT_INSN(),
1639 },
1640 .result = REJECT,
1641 .errstr = "R1 type=inv expected=ctx",
1642 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1643 },
1644 {
1645 "unpriv: spill/fill of different pointers stx",
1646 .insns = {
1647 BPF_MOV64_IMM(BPF_REG_3, 42),
1648 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1653 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1654 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1655 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1656 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1657 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1658 offsetof(struct __sk_buff, mark)),
1659 BPF_MOV64_IMM(BPF_REG_0, 0),
1660 BPF_EXIT_INSN(),
1661 },
1662 .result = REJECT,
1663 .errstr = "same insn cannot be used with different pointers",
1664 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1665 },
1666 {
1667 "unpriv: spill/fill of different pointers ldx",
1668 .insns = {
1669 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1672 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1674 -(__s32)offsetof(struct bpf_perf_event_data,
1675 sample_period) - 8),
1676 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1677 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1678 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1679 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1680 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1681 offsetof(struct bpf_perf_event_data,
1682 sample_period)),
1683 BPF_MOV64_IMM(BPF_REG_0, 0),
1684 BPF_EXIT_INSN(),
1685 },
1686 .result = REJECT,
1687 .errstr = "same insn cannot be used with different pointers",
1688 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1689 },
1690 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001691 "unpriv: write pointer into map elem value",
1692 .insns = {
1693 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1694 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1696 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001697 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1698 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001699 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1700 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1701 BPF_EXIT_INSN(),
1702 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001703 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001704 .errstr_unpriv = "R0 leaks addr",
1705 .result_unpriv = REJECT,
1706 .result = ACCEPT,
1707 },
1708 {
1709 "unpriv: partial copy of pointer",
1710 .insns = {
1711 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1712 BPF_MOV64_IMM(BPF_REG_0, 0),
1713 BPF_EXIT_INSN(),
1714 },
1715 .errstr_unpriv = "R10 partial copy",
1716 .result_unpriv = REJECT,
1717 .result = ACCEPT,
1718 },
1719 {
1720 "unpriv: pass pointer to tail_call",
1721 .insns = {
1722 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1723 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001724 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1725 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001726 BPF_MOV64_IMM(BPF_REG_0, 0),
1727 BPF_EXIT_INSN(),
1728 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001729 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001730 .errstr_unpriv = "R3 leaks addr into helper",
1731 .result_unpriv = REJECT,
1732 .result = ACCEPT,
1733 },
1734 {
1735 "unpriv: cmp map pointer with zero",
1736 .insns = {
1737 BPF_MOV64_IMM(BPF_REG_1, 0),
1738 BPF_LD_MAP_FD(BPF_REG_1, 0),
1739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1740 BPF_MOV64_IMM(BPF_REG_0, 0),
1741 BPF_EXIT_INSN(),
1742 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001743 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001744 .errstr_unpriv = "R1 pointer comparison",
1745 .result_unpriv = REJECT,
1746 .result = ACCEPT,
1747 },
1748 {
1749 "unpriv: write into frame pointer",
1750 .insns = {
1751 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1752 BPF_MOV64_IMM(BPF_REG_0, 0),
1753 BPF_EXIT_INSN(),
1754 },
1755 .errstr = "frame pointer is read only",
1756 .result = REJECT,
1757 },
1758 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001759 "unpriv: spill/fill frame pointer",
1760 .insns = {
1761 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1763 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1764 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1765 BPF_MOV64_IMM(BPF_REG_0, 0),
1766 BPF_EXIT_INSN(),
1767 },
1768 .errstr = "frame pointer is read only",
1769 .result = REJECT,
1770 },
1771 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001772 "unpriv: cmp of frame pointer",
1773 .insns = {
1774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1775 BPF_MOV64_IMM(BPF_REG_0, 0),
1776 BPF_EXIT_INSN(),
1777 },
1778 .errstr_unpriv = "R10 pointer comparison",
1779 .result_unpriv = REJECT,
1780 .result = ACCEPT,
1781 },
1782 {
1783 "unpriv: cmp of stack pointer",
1784 .insns = {
1785 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1788 BPF_MOV64_IMM(BPF_REG_0, 0),
1789 BPF_EXIT_INSN(),
1790 },
1791 .errstr_unpriv = "R2 pointer comparison",
1792 .result_unpriv = REJECT,
1793 .result = ACCEPT,
1794 },
1795 {
1796 "unpriv: obfuscate stack pointer",
1797 .insns = {
1798 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1801 BPF_MOV64_IMM(BPF_REG_0, 0),
1802 BPF_EXIT_INSN(),
1803 },
1804 .errstr_unpriv = "R2 pointer arithmetic",
1805 .result_unpriv = REJECT,
1806 .result = ACCEPT,
1807 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001808 {
1809 "raw_stack: no skb_load_bytes",
1810 .insns = {
1811 BPF_MOV64_IMM(BPF_REG_2, 4),
1812 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1814 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1815 BPF_MOV64_IMM(BPF_REG_4, 8),
1816 /* Call to skb_load_bytes() omitted. */
1817 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1818 BPF_EXIT_INSN(),
1819 },
1820 .result = REJECT,
1821 .errstr = "invalid read from stack off -8+0 size 8",
1822 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1823 },
1824 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001825 "raw_stack: skb_load_bytes, negative len",
1826 .insns = {
1827 BPF_MOV64_IMM(BPF_REG_2, 4),
1828 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1830 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1831 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1833 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001834 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1835 BPF_EXIT_INSN(),
1836 },
1837 .result = REJECT,
1838 .errstr = "invalid stack type R3",
1839 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1840 },
1841 {
1842 "raw_stack: skb_load_bytes, negative len 2",
1843 .insns = {
1844 BPF_MOV64_IMM(BPF_REG_2, 4),
1845 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1847 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1848 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001849 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1850 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001851 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1852 BPF_EXIT_INSN(),
1853 },
1854 .result = REJECT,
1855 .errstr = "invalid stack type R3",
1856 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1857 },
1858 {
1859 "raw_stack: skb_load_bytes, zero len",
1860 .insns = {
1861 BPF_MOV64_IMM(BPF_REG_2, 4),
1862 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1864 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1865 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001866 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1867 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001868 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1869 BPF_EXIT_INSN(),
1870 },
1871 .result = REJECT,
1872 .errstr = "invalid stack type R3",
1873 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1874 },
1875 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001876 "raw_stack: skb_load_bytes, no init",
1877 .insns = {
1878 BPF_MOV64_IMM(BPF_REG_2, 4),
1879 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1881 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1882 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1884 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001885 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1886 BPF_EXIT_INSN(),
1887 },
1888 .result = ACCEPT,
1889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1890 },
1891 {
1892 "raw_stack: skb_load_bytes, init",
1893 .insns = {
1894 BPF_MOV64_IMM(BPF_REG_2, 4),
1895 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1897 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1898 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1899 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1901 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001902 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1903 BPF_EXIT_INSN(),
1904 },
1905 .result = ACCEPT,
1906 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1907 },
1908 {
1909 "raw_stack: skb_load_bytes, spilled regs around bounds",
1910 .insns = {
1911 BPF_MOV64_IMM(BPF_REG_2, 4),
1912 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1913 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001914 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1915 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001916 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1917 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1919 BPF_FUNC_skb_load_bytes),
1920 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1921 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001922 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1923 offsetof(struct __sk_buff, mark)),
1924 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1925 offsetof(struct __sk_buff, priority)),
1926 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1927 BPF_EXIT_INSN(),
1928 },
1929 .result = ACCEPT,
1930 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1931 },
1932 {
1933 "raw_stack: skb_load_bytes, spilled regs corruption",
1934 .insns = {
1935 BPF_MOV64_IMM(BPF_REG_2, 4),
1936 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001938 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001939 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1940 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1942 BPF_FUNC_skb_load_bytes),
1943 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001944 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1945 offsetof(struct __sk_buff, mark)),
1946 BPF_EXIT_INSN(),
1947 },
1948 .result = REJECT,
1949 .errstr = "R0 invalid mem access 'inv'",
1950 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1951 },
1952 {
1953 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1954 .insns = {
1955 BPF_MOV64_IMM(BPF_REG_2, 4),
1956 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001958 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1959 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1960 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001961 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1962 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001963 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1964 BPF_FUNC_skb_load_bytes),
1965 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1966 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1967 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001968 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1969 offsetof(struct __sk_buff, mark)),
1970 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1971 offsetof(struct __sk_buff, priority)),
1972 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1973 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
1974 offsetof(struct __sk_buff, pkt_type)),
1975 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1976 BPF_EXIT_INSN(),
1977 },
1978 .result = REJECT,
1979 .errstr = "R3 invalid mem access 'inv'",
1980 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1981 },
1982 {
1983 "raw_stack: skb_load_bytes, spilled regs + data",
1984 .insns = {
1985 BPF_MOV64_IMM(BPF_REG_2, 4),
1986 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001988 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1989 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1990 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001991 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1992 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1994 BPF_FUNC_skb_load_bytes),
1995 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1996 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1997 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001998 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1999 offsetof(struct __sk_buff, mark)),
2000 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2001 offsetof(struct __sk_buff, priority)),
2002 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2003 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2004 BPF_EXIT_INSN(),
2005 },
2006 .result = ACCEPT,
2007 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2008 },
2009 {
2010 "raw_stack: skb_load_bytes, invalid access 1",
2011 .insns = {
2012 BPF_MOV64_IMM(BPF_REG_2, 4),
2013 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2015 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2016 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002017 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2018 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002019 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2020 BPF_EXIT_INSN(),
2021 },
2022 .result = REJECT,
2023 .errstr = "invalid stack type R3 off=-513 access_size=8",
2024 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2025 },
2026 {
2027 "raw_stack: skb_load_bytes, invalid access 2",
2028 .insns = {
2029 BPF_MOV64_IMM(BPF_REG_2, 4),
2030 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2032 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2033 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2035 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002036 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2037 BPF_EXIT_INSN(),
2038 },
2039 .result = REJECT,
2040 .errstr = "invalid stack type R3 off=-1 access_size=8",
2041 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2042 },
2043 {
2044 "raw_stack: skb_load_bytes, invalid access 3",
2045 .insns = {
2046 BPF_MOV64_IMM(BPF_REG_2, 4),
2047 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2049 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2050 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2052 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002053 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2054 BPF_EXIT_INSN(),
2055 },
2056 .result = REJECT,
2057 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2058 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2059 },
2060 {
2061 "raw_stack: skb_load_bytes, invalid access 4",
2062 .insns = {
2063 BPF_MOV64_IMM(BPF_REG_2, 4),
2064 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2066 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2067 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002068 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2069 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002070 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2071 BPF_EXIT_INSN(),
2072 },
2073 .result = REJECT,
2074 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2075 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2076 },
2077 {
2078 "raw_stack: skb_load_bytes, invalid access 5",
2079 .insns = {
2080 BPF_MOV64_IMM(BPF_REG_2, 4),
2081 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2083 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2084 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2086 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002087 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2088 BPF_EXIT_INSN(),
2089 },
2090 .result = REJECT,
2091 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2092 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2093 },
2094 {
2095 "raw_stack: skb_load_bytes, invalid access 6",
2096 .insns = {
2097 BPF_MOV64_IMM(BPF_REG_2, 4),
2098 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2100 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2101 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002102 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2103 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002104 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2105 BPF_EXIT_INSN(),
2106 },
2107 .result = REJECT,
2108 .errstr = "invalid stack type R3 off=-512 access_size=0",
2109 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2110 },
2111 {
2112 "raw_stack: skb_load_bytes, large access",
2113 .insns = {
2114 BPF_MOV64_IMM(BPF_REG_2, 4),
2115 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2117 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2118 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002119 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2120 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002121 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2122 BPF_EXIT_INSN(),
2123 },
2124 .result = ACCEPT,
2125 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2126 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002127 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002128 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002129 .insns = {
2130 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2131 offsetof(struct __sk_buff, data)),
2132 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2133 offsetof(struct __sk_buff, data_end)),
2134 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2135 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2136 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2137 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2138 BPF_MOV64_IMM(BPF_REG_0, 0),
2139 BPF_EXIT_INSN(),
2140 },
2141 .result = ACCEPT,
2142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2143 },
2144 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002145 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002146 .insns = {
2147 BPF_MOV64_IMM(BPF_REG_0, 1),
2148 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2149 offsetof(struct __sk_buff, data_end)),
2150 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2151 offsetof(struct __sk_buff, data)),
2152 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2154 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2155 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2156 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2157 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2158 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2159 offsetof(struct __sk_buff, data)),
2160 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2161 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2162 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2163 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2164 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2167 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2168 offsetof(struct __sk_buff, data_end)),
2169 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2170 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2171 BPF_MOV64_IMM(BPF_REG_0, 0),
2172 BPF_EXIT_INSN(),
2173 },
2174 .result = ACCEPT,
2175 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2176 },
2177 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002178 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002179 .insns = {
2180 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2181 offsetof(struct __sk_buff, data)),
2182 BPF_MOV64_IMM(BPF_REG_0, 0),
2183 BPF_EXIT_INSN(),
2184 },
2185 .errstr = "invalid bpf_context access off=76",
2186 .result = REJECT,
2187 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2188 },
2189 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002190 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002191 .insns = {
2192 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2193 offsetof(struct __sk_buff, data)),
2194 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2195 offsetof(struct __sk_buff, data_end)),
2196 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2198 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2199 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2200 BPF_MOV64_IMM(BPF_REG_0, 0),
2201 BPF_EXIT_INSN(),
2202 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002203 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002204 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2205 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002206 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002207 "direct packet access: test5 (pkt_end >= reg, good access)",
2208 .insns = {
2209 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2210 offsetof(struct __sk_buff, data)),
2211 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2212 offsetof(struct __sk_buff, data_end)),
2213 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2215 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2216 BPF_MOV64_IMM(BPF_REG_0, 1),
2217 BPF_EXIT_INSN(),
2218 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2219 BPF_MOV64_IMM(BPF_REG_0, 0),
2220 BPF_EXIT_INSN(),
2221 },
2222 .result = ACCEPT,
2223 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2224 },
2225 {
2226 "direct packet access: test6 (pkt_end >= reg, bad access)",
2227 .insns = {
2228 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2229 offsetof(struct __sk_buff, data)),
2230 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2231 offsetof(struct __sk_buff, data_end)),
2232 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2234 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2235 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2236 BPF_MOV64_IMM(BPF_REG_0, 1),
2237 BPF_EXIT_INSN(),
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
2241 .errstr = "invalid access to packet",
2242 .result = REJECT,
2243 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2244 },
2245 {
2246 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2247 .insns = {
2248 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2249 offsetof(struct __sk_buff, data)),
2250 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2251 offsetof(struct __sk_buff, data_end)),
2252 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2254 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2255 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2256 BPF_MOV64_IMM(BPF_REG_0, 1),
2257 BPF_EXIT_INSN(),
2258 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2259 BPF_MOV64_IMM(BPF_REG_0, 0),
2260 BPF_EXIT_INSN(),
2261 },
2262 .errstr = "invalid access to packet",
2263 .result = REJECT,
2264 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2265 },
2266 {
2267 "direct packet access: test8 (double test, variant 1)",
2268 .insns = {
2269 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2270 offsetof(struct __sk_buff, data)),
2271 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2272 offsetof(struct __sk_buff, data_end)),
2273 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2275 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2276 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2277 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2278 BPF_MOV64_IMM(BPF_REG_0, 1),
2279 BPF_EXIT_INSN(),
2280 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2281 BPF_MOV64_IMM(BPF_REG_0, 0),
2282 BPF_EXIT_INSN(),
2283 },
2284 .result = ACCEPT,
2285 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2286 },
2287 {
2288 "direct packet access: test9 (double test, variant 2)",
2289 .insns = {
2290 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2291 offsetof(struct __sk_buff, data)),
2292 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2293 offsetof(struct __sk_buff, data_end)),
2294 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2296 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2297 BPF_MOV64_IMM(BPF_REG_0, 1),
2298 BPF_EXIT_INSN(),
2299 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2300 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2302 BPF_MOV64_IMM(BPF_REG_0, 0),
2303 BPF_EXIT_INSN(),
2304 },
2305 .result = ACCEPT,
2306 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2307 },
2308 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002309 "direct packet access: test10 (write invalid)",
2310 .insns = {
2311 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2312 offsetof(struct __sk_buff, data)),
2313 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2314 offsetof(struct __sk_buff, data_end)),
2315 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2317 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2318 BPF_MOV64_IMM(BPF_REG_0, 0),
2319 BPF_EXIT_INSN(),
2320 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2321 BPF_MOV64_IMM(BPF_REG_0, 0),
2322 BPF_EXIT_INSN(),
2323 },
2324 .errstr = "invalid access to packet",
2325 .result = REJECT,
2326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2327 },
2328 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002329 "direct packet access: test11 (shift, good access)",
2330 .insns = {
2331 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2332 offsetof(struct __sk_buff, data)),
2333 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2334 offsetof(struct __sk_buff, data_end)),
2335 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2337 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2338 BPF_MOV64_IMM(BPF_REG_3, 144),
2339 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2341 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2342 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2343 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2344 BPF_MOV64_IMM(BPF_REG_0, 1),
2345 BPF_EXIT_INSN(),
2346 BPF_MOV64_IMM(BPF_REG_0, 0),
2347 BPF_EXIT_INSN(),
2348 },
2349 .result = ACCEPT,
2350 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2351 },
2352 {
2353 "direct packet access: test12 (and, good access)",
2354 .insns = {
2355 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2356 offsetof(struct __sk_buff, data)),
2357 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2358 offsetof(struct __sk_buff, data_end)),
2359 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2361 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2362 BPF_MOV64_IMM(BPF_REG_3, 144),
2363 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2365 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2366 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2367 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2368 BPF_MOV64_IMM(BPF_REG_0, 1),
2369 BPF_EXIT_INSN(),
2370 BPF_MOV64_IMM(BPF_REG_0, 0),
2371 BPF_EXIT_INSN(),
2372 },
2373 .result = ACCEPT,
2374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2375 },
2376 {
2377 "direct packet access: test13 (branches, good access)",
2378 .insns = {
2379 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2380 offsetof(struct __sk_buff, data)),
2381 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2382 offsetof(struct __sk_buff, data_end)),
2383 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2385 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2386 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2387 offsetof(struct __sk_buff, mark)),
2388 BPF_MOV64_IMM(BPF_REG_4, 1),
2389 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2390 BPF_MOV64_IMM(BPF_REG_3, 14),
2391 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2392 BPF_MOV64_IMM(BPF_REG_3, 24),
2393 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2395 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2396 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2397 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2398 BPF_MOV64_IMM(BPF_REG_0, 1),
2399 BPF_EXIT_INSN(),
2400 BPF_MOV64_IMM(BPF_REG_0, 0),
2401 BPF_EXIT_INSN(),
2402 },
2403 .result = ACCEPT,
2404 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2405 },
2406 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002407 "helper access to packet: test1, valid packet_ptr range",
2408 .insns = {
2409 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2410 offsetof(struct xdp_md, data)),
2411 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2412 offsetof(struct xdp_md, data_end)),
2413 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2414 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2415 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2416 BPF_LD_MAP_FD(BPF_REG_1, 0),
2417 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2418 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2420 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002421 BPF_MOV64_IMM(BPF_REG_0, 0),
2422 BPF_EXIT_INSN(),
2423 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002424 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002425 .result_unpriv = ACCEPT,
2426 .result = ACCEPT,
2427 .prog_type = BPF_PROG_TYPE_XDP,
2428 },
2429 {
2430 "helper access to packet: test2, unchecked packet_ptr",
2431 .insns = {
2432 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2433 offsetof(struct xdp_md, data)),
2434 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2436 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002437 BPF_MOV64_IMM(BPF_REG_0, 0),
2438 BPF_EXIT_INSN(),
2439 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002440 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002441 .result = REJECT,
2442 .errstr = "invalid access to packet",
2443 .prog_type = BPF_PROG_TYPE_XDP,
2444 },
2445 {
2446 "helper access to packet: test3, variable add",
2447 .insns = {
2448 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2449 offsetof(struct xdp_md, data)),
2450 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2451 offsetof(struct xdp_md, data_end)),
2452 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2454 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2455 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2456 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2457 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2458 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2460 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2461 BPF_LD_MAP_FD(BPF_REG_1, 0),
2462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2464 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002465 BPF_MOV64_IMM(BPF_REG_0, 0),
2466 BPF_EXIT_INSN(),
2467 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002468 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002469 .result = ACCEPT,
2470 .prog_type = BPF_PROG_TYPE_XDP,
2471 },
2472 {
2473 "helper access to packet: test4, packet_ptr with bad range",
2474 .insns = {
2475 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2476 offsetof(struct xdp_md, data)),
2477 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2478 offsetof(struct xdp_md, data_end)),
2479 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2481 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2482 BPF_MOV64_IMM(BPF_REG_0, 0),
2483 BPF_EXIT_INSN(),
2484 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002485 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2486 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002487 BPF_MOV64_IMM(BPF_REG_0, 0),
2488 BPF_EXIT_INSN(),
2489 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002490 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002491 .result = REJECT,
2492 .errstr = "invalid access to packet",
2493 .prog_type = BPF_PROG_TYPE_XDP,
2494 },
2495 {
2496 "helper access to packet: test5, packet_ptr with too short range",
2497 .insns = {
2498 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2499 offsetof(struct xdp_md, data)),
2500 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2501 offsetof(struct xdp_md, data_end)),
2502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2503 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2505 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2506 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2508 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002509 BPF_MOV64_IMM(BPF_REG_0, 0),
2510 BPF_EXIT_INSN(),
2511 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002512 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002513 .result = REJECT,
2514 .errstr = "invalid access to packet",
2515 .prog_type = BPF_PROG_TYPE_XDP,
2516 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002517 {
2518 "helper access to packet: test6, cls valid packet_ptr range",
2519 .insns = {
2520 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2521 offsetof(struct __sk_buff, data)),
2522 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2523 offsetof(struct __sk_buff, data_end)),
2524 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2526 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2527 BPF_LD_MAP_FD(BPF_REG_1, 0),
2528 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2529 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2531 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002532 BPF_MOV64_IMM(BPF_REG_0, 0),
2533 BPF_EXIT_INSN(),
2534 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002535 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002536 .result = ACCEPT,
2537 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2538 },
2539 {
2540 "helper access to packet: test7, cls unchecked packet_ptr",
2541 .insns = {
2542 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2543 offsetof(struct __sk_buff, data)),
2544 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002545 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2546 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002547 BPF_MOV64_IMM(BPF_REG_0, 0),
2548 BPF_EXIT_INSN(),
2549 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002550 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002551 .result = REJECT,
2552 .errstr = "invalid access to packet",
2553 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2554 },
2555 {
2556 "helper access to packet: test8, cls variable add",
2557 .insns = {
2558 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2559 offsetof(struct __sk_buff, data)),
2560 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2561 offsetof(struct __sk_buff, data_end)),
2562 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2564 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2565 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2566 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2567 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2568 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2570 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2571 BPF_LD_MAP_FD(BPF_REG_1, 0),
2572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2574 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002575 BPF_MOV64_IMM(BPF_REG_0, 0),
2576 BPF_EXIT_INSN(),
2577 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002578 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002579 .result = ACCEPT,
2580 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2581 },
2582 {
2583 "helper access to packet: test9, cls packet_ptr with bad range",
2584 .insns = {
2585 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2586 offsetof(struct __sk_buff, data)),
2587 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2588 offsetof(struct __sk_buff, data_end)),
2589 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2591 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2592 BPF_MOV64_IMM(BPF_REG_0, 0),
2593 BPF_EXIT_INSN(),
2594 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2596 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002597 BPF_MOV64_IMM(BPF_REG_0, 0),
2598 BPF_EXIT_INSN(),
2599 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002600 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002601 .result = REJECT,
2602 .errstr = "invalid access to packet",
2603 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2604 },
2605 {
2606 "helper access to packet: test10, cls packet_ptr with too short range",
2607 .insns = {
2608 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2609 offsetof(struct __sk_buff, data)),
2610 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2611 offsetof(struct __sk_buff, data_end)),
2612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2613 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2615 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2616 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2618 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002619 BPF_MOV64_IMM(BPF_REG_0, 0),
2620 BPF_EXIT_INSN(),
2621 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002622 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002623 .result = REJECT,
2624 .errstr = "invalid access to packet",
2625 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2626 },
2627 {
2628 "helper access to packet: test11, cls unsuitable helper 1",
2629 .insns = {
2630 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2631 offsetof(struct __sk_buff, data)),
2632 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2633 offsetof(struct __sk_buff, data_end)),
2634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2635 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2637 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2638 BPF_MOV64_IMM(BPF_REG_2, 0),
2639 BPF_MOV64_IMM(BPF_REG_4, 42),
2640 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2642 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002643 BPF_MOV64_IMM(BPF_REG_0, 0),
2644 BPF_EXIT_INSN(),
2645 },
2646 .result = REJECT,
2647 .errstr = "helper access to the packet",
2648 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2649 },
2650 {
2651 "helper access to packet: test12, cls unsuitable helper 2",
2652 .insns = {
2653 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2654 offsetof(struct __sk_buff, data)),
2655 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2656 offsetof(struct __sk_buff, data_end)),
2657 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2659 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2660 BPF_MOV64_IMM(BPF_REG_2, 0),
2661 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002662 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2663 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002664 BPF_MOV64_IMM(BPF_REG_0, 0),
2665 BPF_EXIT_INSN(),
2666 },
2667 .result = REJECT,
2668 .errstr = "helper access to the packet",
2669 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2670 },
2671 {
2672 "helper access to packet: test13, cls helper ok",
2673 .insns = {
2674 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2675 offsetof(struct __sk_buff, data)),
2676 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2677 offsetof(struct __sk_buff, data_end)),
2678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2681 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2682 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2683 BPF_MOV64_IMM(BPF_REG_2, 4),
2684 BPF_MOV64_IMM(BPF_REG_3, 0),
2685 BPF_MOV64_IMM(BPF_REG_4, 0),
2686 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2688 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002689 BPF_MOV64_IMM(BPF_REG_0, 0),
2690 BPF_EXIT_INSN(),
2691 },
2692 .result = ACCEPT,
2693 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2694 },
2695 {
2696 "helper access to packet: test14, cls helper fail sub",
2697 .insns = {
2698 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2699 offsetof(struct __sk_buff, data)),
2700 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2701 offsetof(struct __sk_buff, data_end)),
2702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2705 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2706 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2707 BPF_MOV64_IMM(BPF_REG_2, 4),
2708 BPF_MOV64_IMM(BPF_REG_3, 0),
2709 BPF_MOV64_IMM(BPF_REG_4, 0),
2710 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2712 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002713 BPF_MOV64_IMM(BPF_REG_0, 0),
2714 BPF_EXIT_INSN(),
2715 },
2716 .result = REJECT,
2717 .errstr = "type=inv expected=fp",
2718 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2719 },
2720 {
2721 "helper access to packet: test15, cls helper fail range 1",
2722 .insns = {
2723 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2724 offsetof(struct __sk_buff, data)),
2725 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2726 offsetof(struct __sk_buff, data_end)),
2727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2728 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2730 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2731 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2732 BPF_MOV64_IMM(BPF_REG_2, 8),
2733 BPF_MOV64_IMM(BPF_REG_3, 0),
2734 BPF_MOV64_IMM(BPF_REG_4, 0),
2735 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2737 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002738 BPF_MOV64_IMM(BPF_REG_0, 0),
2739 BPF_EXIT_INSN(),
2740 },
2741 .result = REJECT,
2742 .errstr = "invalid access to packet",
2743 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2744 },
2745 {
2746 "helper access to packet: test16, cls helper fail range 2",
2747 .insns = {
2748 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2749 offsetof(struct __sk_buff, data)),
2750 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2751 offsetof(struct __sk_buff, data_end)),
2752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2753 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2755 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2756 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2757 BPF_MOV64_IMM(BPF_REG_2, -9),
2758 BPF_MOV64_IMM(BPF_REG_3, 0),
2759 BPF_MOV64_IMM(BPF_REG_4, 0),
2760 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2762 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002763 BPF_MOV64_IMM(BPF_REG_0, 0),
2764 BPF_EXIT_INSN(),
2765 },
2766 .result = REJECT,
2767 .errstr = "invalid access to packet",
2768 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2769 },
2770 {
2771 "helper access to packet: test17, cls helper fail range 3",
2772 .insns = {
2773 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2774 offsetof(struct __sk_buff, data)),
2775 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2776 offsetof(struct __sk_buff, data_end)),
2777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2778 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2780 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2781 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2782 BPF_MOV64_IMM(BPF_REG_2, ~0),
2783 BPF_MOV64_IMM(BPF_REG_3, 0),
2784 BPF_MOV64_IMM(BPF_REG_4, 0),
2785 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002786 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2787 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002788 BPF_MOV64_IMM(BPF_REG_0, 0),
2789 BPF_EXIT_INSN(),
2790 },
2791 .result = REJECT,
2792 .errstr = "invalid access to packet",
2793 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2794 },
2795 {
2796 "helper access to packet: test18, cls helper fail range zero",
2797 .insns = {
2798 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2799 offsetof(struct __sk_buff, data)),
2800 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2801 offsetof(struct __sk_buff, data_end)),
2802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2805 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2807 BPF_MOV64_IMM(BPF_REG_2, 0),
2808 BPF_MOV64_IMM(BPF_REG_3, 0),
2809 BPF_MOV64_IMM(BPF_REG_4, 0),
2810 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2812 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002813 BPF_MOV64_IMM(BPF_REG_0, 0),
2814 BPF_EXIT_INSN(),
2815 },
2816 .result = REJECT,
2817 .errstr = "invalid access to packet",
2818 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2819 },
2820 {
2821 "helper access to packet: test19, pkt end as input",
2822 .insns = {
2823 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2824 offsetof(struct __sk_buff, data)),
2825 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2826 offsetof(struct __sk_buff, data_end)),
2827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2828 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2830 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
2832 BPF_MOV64_IMM(BPF_REG_2, 4),
2833 BPF_MOV64_IMM(BPF_REG_3, 0),
2834 BPF_MOV64_IMM(BPF_REG_4, 0),
2835 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2837 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002838 BPF_MOV64_IMM(BPF_REG_0, 0),
2839 BPF_EXIT_INSN(),
2840 },
2841 .result = REJECT,
2842 .errstr = "R1 type=pkt_end expected=fp",
2843 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2844 },
2845 {
2846 "helper access to packet: test20, wrong reg",
2847 .insns = {
2848 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2849 offsetof(struct __sk_buff, data)),
2850 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2851 offsetof(struct __sk_buff, data_end)),
2852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2853 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2855 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2856 BPF_MOV64_IMM(BPF_REG_2, 4),
2857 BPF_MOV64_IMM(BPF_REG_3, 0),
2858 BPF_MOV64_IMM(BPF_REG_4, 0),
2859 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2861 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002862 BPF_MOV64_IMM(BPF_REG_0, 0),
2863 BPF_EXIT_INSN(),
2864 },
2865 .result = REJECT,
2866 .errstr = "invalid access to packet",
2867 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2868 },
Josef Bacik48461132016-09-28 10:54:32 -04002869 {
2870 "valid map access into an array with a constant",
2871 .insns = {
2872 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2875 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2877 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002879 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2880 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002881 BPF_EXIT_INSN(),
2882 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002883 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04002884 .errstr_unpriv = "R0 leaks addr",
2885 .result_unpriv = REJECT,
2886 .result = ACCEPT,
2887 },
2888 {
2889 "valid map access into an array with a register",
2890 .insns = {
2891 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2892 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2894 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002895 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2896 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002897 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2898 BPF_MOV64_IMM(BPF_REG_1, 4),
2899 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2900 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002901 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2902 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002903 BPF_EXIT_INSN(),
2904 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002905 .fixup_map2 = { 3 },
2906 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002907 .result_unpriv = REJECT,
2908 .result = ACCEPT,
2909 },
2910 {
2911 "valid map access into an array with a variable",
2912 .insns = {
2913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2914 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2916 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2918 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002919 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
2920 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2921 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
2922 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2923 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002924 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2925 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002926 BPF_EXIT_INSN(),
2927 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002928 .fixup_map2 = { 3 },
2929 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002930 .result_unpriv = REJECT,
2931 .result = ACCEPT,
2932 },
2933 {
2934 "valid map access into an array with a signed variable",
2935 .insns = {
2936 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2937 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2939 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002940 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2941 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002942 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
2943 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2944 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
2945 BPF_MOV32_IMM(BPF_REG_1, 0),
2946 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
2947 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
2948 BPF_MOV32_IMM(BPF_REG_1, 0),
2949 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
2950 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002951 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2952 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002953 BPF_EXIT_INSN(),
2954 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002955 .fixup_map2 = { 3 },
2956 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002957 .result_unpriv = REJECT,
2958 .result = ACCEPT,
2959 },
2960 {
2961 "invalid map access into an array with a constant",
2962 .insns = {
2963 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2966 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2968 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002969 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2970 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
2971 offsetof(struct test_val, foo)),
2972 BPF_EXIT_INSN(),
2973 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002974 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04002975 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
2976 .result = REJECT,
2977 },
2978 {
2979 "invalid map access into an array with a register",
2980 .insns = {
2981 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2982 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2984 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002985 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2986 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002987 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2988 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
2989 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2990 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002991 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2992 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002993 BPF_EXIT_INSN(),
2994 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002995 .fixup_map2 = { 3 },
2996 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002997 .errstr = "R0 min value is outside of the array range",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002998 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04002999 .result = REJECT,
3000 },
3001 {
3002 "invalid map access into an array with a variable",
3003 .insns = {
3004 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3005 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3007 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003008 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3009 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003010 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3011 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3012 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3013 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003014 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3015 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003016 BPF_EXIT_INSN(),
3017 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003018 .fixup_map2 = { 3 },
3019 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003020 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003021 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003022 .result = REJECT,
3023 },
3024 {
3025 "invalid map access into an array with no floor check",
3026 .insns = {
3027 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3030 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3032 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3034 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3035 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3036 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3037 BPF_MOV32_IMM(BPF_REG_1, 0),
3038 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3039 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003040 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3041 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003042 BPF_EXIT_INSN(),
3043 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003044 .fixup_map2 = { 3 },
3045 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003046 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003047 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003048 .result = REJECT,
3049 },
3050 {
3051 "invalid map access into an array with a invalid max check",
3052 .insns = {
3053 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3056 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3058 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3060 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3061 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3062 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3063 BPF_MOV32_IMM(BPF_REG_1, 0),
3064 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3065 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003066 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3067 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003068 BPF_EXIT_INSN(),
3069 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003070 .fixup_map2 = { 3 },
3071 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003072 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003073 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003074 .result = REJECT,
3075 },
3076 {
3077 "invalid map access into an array with a invalid max check",
3078 .insns = {
3079 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3082 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3084 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3086 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3087 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3088 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3090 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3092 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003093 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3094 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003095 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3096 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003097 BPF_EXIT_INSN(),
3098 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003099 .fixup_map2 = { 3, 11 },
3100 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003101 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003102 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003103 .result = REJECT,
3104 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003105 {
3106 "multiple registers share map_lookup_elem result",
3107 .insns = {
3108 BPF_MOV64_IMM(BPF_REG_1, 10),
3109 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3110 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3112 BPF_LD_MAP_FD(BPF_REG_1, 0),
3113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3114 BPF_FUNC_map_lookup_elem),
3115 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3117 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3118 BPF_EXIT_INSN(),
3119 },
3120 .fixup_map1 = { 4 },
3121 .result = ACCEPT,
3122 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3123 },
3124 {
3125 "invalid memory access with multiple map_lookup_elem calls",
3126 .insns = {
3127 BPF_MOV64_IMM(BPF_REG_1, 10),
3128 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3131 BPF_LD_MAP_FD(BPF_REG_1, 0),
3132 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3133 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3134 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3135 BPF_FUNC_map_lookup_elem),
3136 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3137 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3138 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3140 BPF_FUNC_map_lookup_elem),
3141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3142 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3143 BPF_EXIT_INSN(),
3144 },
3145 .fixup_map1 = { 4 },
3146 .result = REJECT,
3147 .errstr = "R4 !read_ok",
3148 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3149 },
3150 {
3151 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3152 .insns = {
3153 BPF_MOV64_IMM(BPF_REG_1, 10),
3154 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3155 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3157 BPF_LD_MAP_FD(BPF_REG_1, 0),
3158 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3159 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3160 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3161 BPF_FUNC_map_lookup_elem),
3162 BPF_MOV64_IMM(BPF_REG_2, 10),
3163 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3166 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3167 BPF_FUNC_map_lookup_elem),
3168 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3170 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3171 BPF_EXIT_INSN(),
3172 },
3173 .fixup_map1 = { 4 },
3174 .result = ACCEPT,
3175 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3176 },
Josef Bacike9548902016-11-29 12:35:19 -05003177 {
Daniel Borkmanna08dd0d2016-12-15 01:30:06 +01003178 "multiple registers share map_lookup_elem bad reg type",
3179 .insns = {
3180 BPF_MOV64_IMM(BPF_REG_1, 10),
3181 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3182 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3184 BPF_LD_MAP_FD(BPF_REG_1, 0),
3185 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3186 BPF_FUNC_map_lookup_elem),
3187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3189 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3190 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3191 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3192 BPF_MOV64_IMM(BPF_REG_1, 1),
3193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3194 BPF_MOV64_IMM(BPF_REG_1, 2),
3195 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3196 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3197 BPF_MOV64_IMM(BPF_REG_1, 3),
3198 BPF_EXIT_INSN(),
3199 },
3200 .fixup_map1 = { 4 },
3201 .result = REJECT,
3202 .errstr = "R3 invalid mem access 'inv'",
3203 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3204 },
3205 {
Josef Bacike9548902016-11-29 12:35:19 -05003206 "invalid map access from else condition",
3207 .insns = {
3208 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3211 BPF_LD_MAP_FD(BPF_REG_1, 0),
3212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3213 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3214 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3215 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3217 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3218 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3220 BPF_EXIT_INSN(),
3221 },
3222 .fixup_map2 = { 3 },
3223 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3224 .result = REJECT,
3225 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3226 .result_unpriv = REJECT,
3227 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08003228 {
3229 "constant register |= constant should keep constant type",
3230 .insns = {
3231 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3233 BPF_MOV64_IMM(BPF_REG_2, 34),
3234 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3235 BPF_MOV64_IMM(BPF_REG_3, 0),
3236 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3237 BPF_EXIT_INSN(),
3238 },
3239 .result = ACCEPT,
3240 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3241 },
3242 {
3243 "constant register |= constant should not bypass stack boundary checks",
3244 .insns = {
3245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3247 BPF_MOV64_IMM(BPF_REG_2, 34),
3248 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3249 BPF_MOV64_IMM(BPF_REG_3, 0),
3250 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3251 BPF_EXIT_INSN(),
3252 },
3253 .errstr = "invalid stack type R1 off=-48 access_size=58",
3254 .result = REJECT,
3255 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3256 },
3257 {
3258 "constant register |= constant register should keep constant type",
3259 .insns = {
3260 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3262 BPF_MOV64_IMM(BPF_REG_2, 34),
3263 BPF_MOV64_IMM(BPF_REG_4, 13),
3264 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3265 BPF_MOV64_IMM(BPF_REG_3, 0),
3266 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3267 BPF_EXIT_INSN(),
3268 },
3269 .result = ACCEPT,
3270 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3271 },
3272 {
3273 "constant register |= constant register should not bypass stack boundary checks",
3274 .insns = {
3275 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3277 BPF_MOV64_IMM(BPF_REG_2, 34),
3278 BPF_MOV64_IMM(BPF_REG_4, 24),
3279 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3280 BPF_MOV64_IMM(BPF_REG_3, 0),
3281 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3282 BPF_EXIT_INSN(),
3283 },
3284 .errstr = "invalid stack type R1 off=-48 access_size=58",
3285 .result = REJECT,
3286 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3287 },
Thomas Graf3f731d82016-12-05 10:30:52 +01003288 {
3289 "invalid direct packet write for LWT_IN",
3290 .insns = {
3291 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3292 offsetof(struct __sk_buff, data)),
3293 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3294 offsetof(struct __sk_buff, data_end)),
3295 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3297 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3298 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3299 BPF_MOV64_IMM(BPF_REG_0, 0),
3300 BPF_EXIT_INSN(),
3301 },
3302 .errstr = "cannot write into packet",
3303 .result = REJECT,
3304 .prog_type = BPF_PROG_TYPE_LWT_IN,
3305 },
3306 {
3307 "invalid direct packet write for LWT_OUT",
3308 .insns = {
3309 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3310 offsetof(struct __sk_buff, data)),
3311 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3312 offsetof(struct __sk_buff, data_end)),
3313 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3315 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3316 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3317 BPF_MOV64_IMM(BPF_REG_0, 0),
3318 BPF_EXIT_INSN(),
3319 },
3320 .errstr = "cannot write into packet",
3321 .result = REJECT,
3322 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3323 },
3324 {
3325 "direct packet write for LWT_XMIT",
3326 .insns = {
3327 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3328 offsetof(struct __sk_buff, data)),
3329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3330 offsetof(struct __sk_buff, data_end)),
3331 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3333 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3334 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3335 BPF_MOV64_IMM(BPF_REG_0, 0),
3336 BPF_EXIT_INSN(),
3337 },
3338 .result = ACCEPT,
3339 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3340 },
3341 {
3342 "direct packet read for LWT_IN",
3343 .insns = {
3344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3345 offsetof(struct __sk_buff, data)),
3346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3347 offsetof(struct __sk_buff, data_end)),
3348 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3350 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3351 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3352 BPF_MOV64_IMM(BPF_REG_0, 0),
3353 BPF_EXIT_INSN(),
3354 },
3355 .result = ACCEPT,
3356 .prog_type = BPF_PROG_TYPE_LWT_IN,
3357 },
3358 {
3359 "direct packet read for LWT_OUT",
3360 .insns = {
3361 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3362 offsetof(struct __sk_buff, data)),
3363 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3364 offsetof(struct __sk_buff, data_end)),
3365 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3367 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3368 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3369 BPF_MOV64_IMM(BPF_REG_0, 0),
3370 BPF_EXIT_INSN(),
3371 },
3372 .result = ACCEPT,
3373 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3374 },
3375 {
3376 "direct packet read for LWT_XMIT",
3377 .insns = {
3378 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3379 offsetof(struct __sk_buff, data)),
3380 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3381 offsetof(struct __sk_buff, data_end)),
3382 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3384 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3385 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3386 BPF_MOV64_IMM(BPF_REG_0, 0),
3387 BPF_EXIT_INSN(),
3388 },
3389 .result = ACCEPT,
3390 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3391 },
3392 {
3393 "invalid access of tc_classid for LWT_IN",
3394 .insns = {
3395 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3396 offsetof(struct __sk_buff, tc_classid)),
3397 BPF_EXIT_INSN(),
3398 },
3399 .result = REJECT,
3400 .errstr = "invalid bpf_context access",
3401 },
3402 {
3403 "invalid access of tc_classid for LWT_OUT",
3404 .insns = {
3405 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3406 offsetof(struct __sk_buff, tc_classid)),
3407 BPF_EXIT_INSN(),
3408 },
3409 .result = REJECT,
3410 .errstr = "invalid bpf_context access",
3411 },
3412 {
3413 "invalid access of tc_classid for LWT_XMIT",
3414 .insns = {
3415 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3416 offsetof(struct __sk_buff, tc_classid)),
3417 BPF_EXIT_INSN(),
3418 },
3419 .result = REJECT,
3420 .errstr = "invalid bpf_context access",
3421 },
Gianluca Borello57225692017-01-09 10:19:47 -08003422 {
3423 "helper access to map: full range",
3424 .insns = {
3425 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3427 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3428 BPF_LD_MAP_FD(BPF_REG_1, 0),
3429 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3430 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3431 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3432 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3433 BPF_MOV64_IMM(BPF_REG_3, 0),
3434 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3435 BPF_EXIT_INSN(),
3436 },
3437 .fixup_map2 = { 3 },
3438 .result = ACCEPT,
3439 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3440 },
3441 {
3442 "helper access to map: partial range",
3443 .insns = {
3444 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3446 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3447 BPF_LD_MAP_FD(BPF_REG_1, 0),
3448 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3449 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3450 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3451 BPF_MOV64_IMM(BPF_REG_2, 8),
3452 BPF_MOV64_IMM(BPF_REG_3, 0),
3453 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3454 BPF_EXIT_INSN(),
3455 },
3456 .fixup_map2 = { 3 },
3457 .result = ACCEPT,
3458 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3459 },
3460 {
3461 "helper access to map: empty range",
3462 .insns = {
3463 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3465 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3466 BPF_LD_MAP_FD(BPF_REG_1, 0),
3467 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3469 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3470 BPF_MOV64_IMM(BPF_REG_2, 0),
3471 BPF_MOV64_IMM(BPF_REG_3, 0),
3472 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3473 BPF_EXIT_INSN(),
3474 },
3475 .fixup_map2 = { 3 },
3476 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3477 .result = REJECT,
3478 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3479 },
3480 {
3481 "helper access to map: out-of-bound range",
3482 .insns = {
3483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3485 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3486 BPF_LD_MAP_FD(BPF_REG_1, 0),
3487 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3488 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3489 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3490 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3491 BPF_MOV64_IMM(BPF_REG_3, 0),
3492 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3493 BPF_EXIT_INSN(),
3494 },
3495 .fixup_map2 = { 3 },
3496 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3497 .result = REJECT,
3498 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3499 },
3500 {
3501 "helper access to map: negative range",
3502 .insns = {
3503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3505 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3506 BPF_LD_MAP_FD(BPF_REG_1, 0),
3507 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3508 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3509 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3510 BPF_MOV64_IMM(BPF_REG_2, -8),
3511 BPF_MOV64_IMM(BPF_REG_3, 0),
3512 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3513 BPF_EXIT_INSN(),
3514 },
3515 .fixup_map2 = { 3 },
3516 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3517 .result = REJECT,
3518 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3519 },
3520 {
3521 "helper access to adjusted map (via const imm): full range",
3522 .insns = {
3523 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3525 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3526 BPF_LD_MAP_FD(BPF_REG_1, 0),
3527 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3531 offsetof(struct test_val, foo)),
3532 BPF_MOV64_IMM(BPF_REG_2,
3533 sizeof(struct test_val) -
3534 offsetof(struct test_val, foo)),
3535 BPF_MOV64_IMM(BPF_REG_3, 0),
3536 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3537 BPF_EXIT_INSN(),
3538 },
3539 .fixup_map2 = { 3 },
3540 .result = ACCEPT,
3541 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3542 },
3543 {
3544 "helper access to adjusted map (via const imm): partial range",
3545 .insns = {
3546 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3548 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3549 BPF_LD_MAP_FD(BPF_REG_1, 0),
3550 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3552 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3554 offsetof(struct test_val, foo)),
3555 BPF_MOV64_IMM(BPF_REG_2, 8),
3556 BPF_MOV64_IMM(BPF_REG_3, 0),
3557 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3558 BPF_EXIT_INSN(),
3559 },
3560 .fixup_map2 = { 3 },
3561 .result = ACCEPT,
3562 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3563 },
3564 {
3565 "helper access to adjusted map (via const imm): empty range",
3566 .insns = {
3567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3569 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3570 BPF_LD_MAP_FD(BPF_REG_1, 0),
3571 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3575 offsetof(struct test_val, foo)),
3576 BPF_MOV64_IMM(BPF_REG_2, 0),
3577 BPF_MOV64_IMM(BPF_REG_3, 0),
3578 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3579 BPF_EXIT_INSN(),
3580 },
3581 .fixup_map2 = { 3 },
3582 .errstr = "R1 min value is outside of the array range",
3583 .result = REJECT,
3584 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3585 },
3586 {
3587 "helper access to adjusted map (via const imm): out-of-bound range",
3588 .insns = {
3589 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3591 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3592 BPF_LD_MAP_FD(BPF_REG_1, 0),
3593 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3595 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3597 offsetof(struct test_val, foo)),
3598 BPF_MOV64_IMM(BPF_REG_2,
3599 sizeof(struct test_val) -
3600 offsetof(struct test_val, foo) + 8),
3601 BPF_MOV64_IMM(BPF_REG_3, 0),
3602 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3603 BPF_EXIT_INSN(),
3604 },
3605 .fixup_map2 = { 3 },
3606 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3607 .result = REJECT,
3608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3609 },
3610 {
3611 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3612 .insns = {
3613 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3615 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3616 BPF_LD_MAP_FD(BPF_REG_1, 0),
3617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3621 offsetof(struct test_val, foo)),
3622 BPF_MOV64_IMM(BPF_REG_2, -8),
3623 BPF_MOV64_IMM(BPF_REG_3, 0),
3624 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3625 BPF_EXIT_INSN(),
3626 },
3627 .fixup_map2 = { 3 },
3628 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3629 .result = REJECT,
3630 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3631 },
3632 {
3633 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3634 .insns = {
3635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3637 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3638 BPF_LD_MAP_FD(BPF_REG_1, 0),
3639 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3640 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3643 offsetof(struct test_val, foo)),
3644 BPF_MOV64_IMM(BPF_REG_2, -1),
3645 BPF_MOV64_IMM(BPF_REG_3, 0),
3646 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3647 BPF_EXIT_INSN(),
3648 },
3649 .fixup_map2 = { 3 },
3650 .errstr = "R1 min value is outside of the array range",
3651 .result = REJECT,
3652 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3653 },
3654 {
3655 "helper access to adjusted map (via const reg): full range",
3656 .insns = {
3657 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3659 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3660 BPF_LD_MAP_FD(BPF_REG_1, 0),
3661 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3664 BPF_MOV64_IMM(BPF_REG_3,
3665 offsetof(struct test_val, foo)),
3666 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3667 BPF_MOV64_IMM(BPF_REG_2,
3668 sizeof(struct test_val) -
3669 offsetof(struct test_val, foo)),
3670 BPF_MOV64_IMM(BPF_REG_3, 0),
3671 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3672 BPF_EXIT_INSN(),
3673 },
3674 .fixup_map2 = { 3 },
3675 .result = ACCEPT,
3676 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3677 },
3678 {
3679 "helper access to adjusted map (via const reg): partial range",
3680 .insns = {
3681 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3683 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3684 BPF_LD_MAP_FD(BPF_REG_1, 0),
3685 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3688 BPF_MOV64_IMM(BPF_REG_3,
3689 offsetof(struct test_val, foo)),
3690 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3691 BPF_MOV64_IMM(BPF_REG_2, 8),
3692 BPF_MOV64_IMM(BPF_REG_3, 0),
3693 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3694 BPF_EXIT_INSN(),
3695 },
3696 .fixup_map2 = { 3 },
3697 .result = ACCEPT,
3698 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3699 },
3700 {
3701 "helper access to adjusted map (via const reg): empty range",
3702 .insns = {
3703 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3705 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3706 BPF_LD_MAP_FD(BPF_REG_1, 0),
3707 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3710 BPF_MOV64_IMM(BPF_REG_3, 0),
3711 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3712 BPF_MOV64_IMM(BPF_REG_2, 0),
3713 BPF_MOV64_IMM(BPF_REG_3, 0),
3714 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3715 BPF_EXIT_INSN(),
3716 },
3717 .fixup_map2 = { 3 },
3718 .errstr = "R1 min value is outside of the array range",
3719 .result = REJECT,
3720 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3721 },
3722 {
3723 "helper access to adjusted map (via const reg): out-of-bound range",
3724 .insns = {
3725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3727 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3728 BPF_LD_MAP_FD(BPF_REG_1, 0),
3729 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3731 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3732 BPF_MOV64_IMM(BPF_REG_3,
3733 offsetof(struct test_val, foo)),
3734 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3735 BPF_MOV64_IMM(BPF_REG_2,
3736 sizeof(struct test_val) -
3737 offsetof(struct test_val, foo) + 8),
3738 BPF_MOV64_IMM(BPF_REG_3, 0),
3739 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3740 BPF_EXIT_INSN(),
3741 },
3742 .fixup_map2 = { 3 },
3743 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3744 .result = REJECT,
3745 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3746 },
3747 {
3748 "helper access to adjusted map (via const reg): negative range (> adjustment)",
3749 .insns = {
3750 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3752 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3753 BPF_LD_MAP_FD(BPF_REG_1, 0),
3754 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3755 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3756 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3757 BPF_MOV64_IMM(BPF_REG_3,
3758 offsetof(struct test_val, foo)),
3759 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3760 BPF_MOV64_IMM(BPF_REG_2, -8),
3761 BPF_MOV64_IMM(BPF_REG_3, 0),
3762 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3763 BPF_EXIT_INSN(),
3764 },
3765 .fixup_map2 = { 3 },
3766 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3767 .result = REJECT,
3768 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3769 },
3770 {
3771 "helper access to adjusted map (via const reg): negative range (< adjustment)",
3772 .insns = {
3773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3775 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3776 BPF_LD_MAP_FD(BPF_REG_1, 0),
3777 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3780 BPF_MOV64_IMM(BPF_REG_3,
3781 offsetof(struct test_val, foo)),
3782 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3783 BPF_MOV64_IMM(BPF_REG_2, -1),
3784 BPF_MOV64_IMM(BPF_REG_3, 0),
3785 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3786 BPF_EXIT_INSN(),
3787 },
3788 .fixup_map2 = { 3 },
3789 .errstr = "R1 min value is outside of the array range",
3790 .result = REJECT,
3791 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3792 },
3793 {
3794 "helper access to adjusted map (via variable): full range",
3795 .insns = {
3796 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3798 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3799 BPF_LD_MAP_FD(BPF_REG_1, 0),
3800 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3802 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3803 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3804 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3805 offsetof(struct test_val, foo), 4),
3806 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3807 BPF_MOV64_IMM(BPF_REG_2,
3808 sizeof(struct test_val) -
3809 offsetof(struct test_val, foo)),
3810 BPF_MOV64_IMM(BPF_REG_3, 0),
3811 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3812 BPF_EXIT_INSN(),
3813 },
3814 .fixup_map2 = { 3 },
3815 .result = ACCEPT,
3816 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3817 },
3818 {
3819 "helper access to adjusted map (via variable): partial range",
3820 .insns = {
3821 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3823 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3824 BPF_LD_MAP_FD(BPF_REG_1, 0),
3825 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3828 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3829 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3830 offsetof(struct test_val, foo), 4),
3831 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3832 BPF_MOV64_IMM(BPF_REG_2, 8),
3833 BPF_MOV64_IMM(BPF_REG_3, 0),
3834 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3835 BPF_EXIT_INSN(),
3836 },
3837 .fixup_map2 = { 3 },
3838 .result = ACCEPT,
3839 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3840 },
3841 {
3842 "helper access to adjusted map (via variable): empty range",
3843 .insns = {
3844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3846 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3847 BPF_LD_MAP_FD(BPF_REG_1, 0),
3848 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3851 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3852 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3853 offsetof(struct test_val, foo), 4),
3854 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3855 BPF_MOV64_IMM(BPF_REG_2, 0),
3856 BPF_MOV64_IMM(BPF_REG_3, 0),
3857 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3858 BPF_EXIT_INSN(),
3859 },
3860 .fixup_map2 = { 3 },
3861 .errstr = "R1 min value is outside of the array range",
3862 .result = REJECT,
3863 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3864 },
3865 {
3866 "helper access to adjusted map (via variable): no max check",
3867 .insns = {
3868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3870 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3871 BPF_LD_MAP_FD(BPF_REG_1, 0),
3872 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3876 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3877 BPF_MOV64_IMM(BPF_REG_2, 0),
3878 BPF_MOV64_IMM(BPF_REG_3, 0),
3879 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3880 BPF_EXIT_INSN(),
3881 },
3882 .fixup_map2 = { 3 },
3883 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
3884 .result = REJECT,
3885 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3886 },
3887 {
3888 "helper access to adjusted map (via variable): wrong max check",
3889 .insns = {
3890 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3892 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3893 BPF_LD_MAP_FD(BPF_REG_1, 0),
3894 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3895 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3896 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3898 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3899 offsetof(struct test_val, foo), 4),
3900 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3901 BPF_MOV64_IMM(BPF_REG_2,
3902 sizeof(struct test_val) -
3903 offsetof(struct test_val, foo) + 1),
3904 BPF_MOV64_IMM(BPF_REG_3, 0),
3905 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3906 BPF_EXIT_INSN(),
3907 },
3908 .fixup_map2 = { 3 },
3909 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
3910 .result = REJECT,
3911 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3912 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08003913 {
3914 "map element value is preserved across register spilling",
3915 .insns = {
3916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3918 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3919 BPF_LD_MAP_FD(BPF_REG_1, 0),
3920 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3922 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
3923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
3925 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3926 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
3927 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
3928 BPF_EXIT_INSN(),
3929 },
3930 .fixup_map2 = { 3 },
3931 .errstr_unpriv = "R0 leaks addr",
3932 .result = ACCEPT,
3933 .result_unpriv = REJECT,
3934 },
3935 {
3936 "map element value (adjusted) is preserved across register spilling",
3937 .insns = {
3938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3940 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3941 BPF_LD_MAP_FD(BPF_REG_1, 0),
3942 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
3945 offsetof(struct test_val, foo)),
3946 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
3947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
3949 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3950 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
3951 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
3952 BPF_EXIT_INSN(),
3953 },
3954 .fixup_map2 = { 3 },
3955 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3956 .result = ACCEPT,
3957 .result_unpriv = REJECT,
3958 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08003959 {
3960 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
3961 .insns = {
3962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
3964 BPF_MOV64_IMM(BPF_REG_0, 0),
3965 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
3966 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
3967 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
3968 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
3969 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
3970 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
3971 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
3972 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3973 BPF_MOV64_IMM(BPF_REG_2, 16),
3974 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
3975 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
3976 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
3977 BPF_MOV64_IMM(BPF_REG_4, 0),
3978 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
3979 BPF_MOV64_IMM(BPF_REG_3, 0),
3980 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3981 BPF_MOV64_IMM(BPF_REG_0, 0),
3982 BPF_EXIT_INSN(),
3983 },
3984 .result = ACCEPT,
3985 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3986 },
3987 {
3988 "helper access to variable memory: stack, bitwise AND, zero included",
3989 .insns = {
3990 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
3992 BPF_MOV64_IMM(BPF_REG_2, 16),
3993 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
3994 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
3995 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
3996 BPF_MOV64_IMM(BPF_REG_3, 0),
3997 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3998 BPF_EXIT_INSN(),
3999 },
4000 .errstr = "invalid stack type R1 off=-64 access_size=0",
4001 .result = REJECT,
4002 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4003 },
4004 {
4005 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4006 .insns = {
4007 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4008 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4009 BPF_MOV64_IMM(BPF_REG_2, 16),
4010 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4011 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4012 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4013 BPF_MOV64_IMM(BPF_REG_4, 0),
4014 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4015 BPF_MOV64_IMM(BPF_REG_3, 0),
4016 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4017 BPF_MOV64_IMM(BPF_REG_0, 0),
4018 BPF_EXIT_INSN(),
4019 },
4020 .errstr = "invalid stack type R1 off=-64 access_size=65",
4021 .result = REJECT,
4022 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4023 },
4024 {
4025 "helper access to variable memory: stack, JMP, correct bounds",
4026 .insns = {
4027 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4029 BPF_MOV64_IMM(BPF_REG_0, 0),
4030 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4031 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4032 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4033 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4034 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4035 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4036 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4037 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4038 BPF_MOV64_IMM(BPF_REG_2, 16),
4039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4040 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4041 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4042 BPF_MOV64_IMM(BPF_REG_4, 0),
4043 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4044 BPF_MOV64_IMM(BPF_REG_3, 0),
4045 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4046 BPF_MOV64_IMM(BPF_REG_0, 0),
4047 BPF_EXIT_INSN(),
4048 },
4049 .result = ACCEPT,
4050 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4051 },
4052 {
4053 "helper access to variable memory: stack, JMP (signed), correct bounds",
4054 .insns = {
4055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4057 BPF_MOV64_IMM(BPF_REG_0, 0),
4058 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4061 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4062 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4063 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4064 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4065 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4066 BPF_MOV64_IMM(BPF_REG_2, 16),
4067 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4068 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4069 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4070 BPF_MOV64_IMM(BPF_REG_4, 0),
4071 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4072 BPF_MOV64_IMM(BPF_REG_3, 0),
4073 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4074 BPF_MOV64_IMM(BPF_REG_0, 0),
4075 BPF_EXIT_INSN(),
4076 },
4077 .result = ACCEPT,
4078 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4079 },
4080 {
4081 "helper access to variable memory: stack, JMP, bounds + offset",
4082 .insns = {
4083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4085 BPF_MOV64_IMM(BPF_REG_2, 16),
4086 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4087 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4088 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4089 BPF_MOV64_IMM(BPF_REG_4, 0),
4090 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4092 BPF_MOV64_IMM(BPF_REG_3, 0),
4093 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4094 BPF_MOV64_IMM(BPF_REG_0, 0),
4095 BPF_EXIT_INSN(),
4096 },
4097 .errstr = "invalid stack type R1 off=-64 access_size=65",
4098 .result = REJECT,
4099 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4100 },
4101 {
4102 "helper access to variable memory: stack, JMP, wrong max",
4103 .insns = {
4104 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4106 BPF_MOV64_IMM(BPF_REG_2, 16),
4107 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4108 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4109 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4110 BPF_MOV64_IMM(BPF_REG_4, 0),
4111 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4112 BPF_MOV64_IMM(BPF_REG_3, 0),
4113 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4114 BPF_MOV64_IMM(BPF_REG_0, 0),
4115 BPF_EXIT_INSN(),
4116 },
4117 .errstr = "invalid stack type R1 off=-64 access_size=65",
4118 .result = REJECT,
4119 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4120 },
4121 {
4122 "helper access to variable memory: stack, JMP, no max check",
4123 .insns = {
4124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4126 BPF_MOV64_IMM(BPF_REG_2, 16),
4127 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4128 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4129 BPF_MOV64_IMM(BPF_REG_4, 0),
4130 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4131 BPF_MOV64_IMM(BPF_REG_3, 0),
4132 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4133 BPF_MOV64_IMM(BPF_REG_0, 0),
4134 BPF_EXIT_INSN(),
4135 },
4136 .errstr = "R2 unbounded memory access",
4137 .result = REJECT,
4138 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4139 },
4140 {
4141 "helper access to variable memory: stack, JMP, no min check",
4142 .insns = {
4143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4145 BPF_MOV64_IMM(BPF_REG_2, 16),
4146 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4147 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4148 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4149 BPF_MOV64_IMM(BPF_REG_3, 0),
4150 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4151 BPF_MOV64_IMM(BPF_REG_0, 0),
4152 BPF_EXIT_INSN(),
4153 },
4154 .errstr = "invalid stack type R1 off=-64 access_size=0",
4155 .result = REJECT,
4156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4157 },
4158 {
4159 "helper access to variable memory: stack, JMP (signed), no min check",
4160 .insns = {
4161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4163 BPF_MOV64_IMM(BPF_REG_2, 16),
4164 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4165 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4166 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4167 BPF_MOV64_IMM(BPF_REG_3, 0),
4168 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4169 BPF_MOV64_IMM(BPF_REG_0, 0),
4170 BPF_EXIT_INSN(),
4171 },
4172 .errstr = "R2 min value is negative",
4173 .result = REJECT,
4174 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4175 },
4176 {
4177 "helper access to variable memory: map, JMP, correct bounds",
4178 .insns = {
4179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4181 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4182 BPF_LD_MAP_FD(BPF_REG_1, 0),
4183 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4186 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4187 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4188 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4189 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4190 sizeof(struct test_val), 4),
4191 BPF_MOV64_IMM(BPF_REG_4, 0),
4192 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4193 BPF_MOV64_IMM(BPF_REG_3, 0),
4194 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4195 BPF_MOV64_IMM(BPF_REG_0, 0),
4196 BPF_EXIT_INSN(),
4197 },
4198 .fixup_map2 = { 3 },
4199 .result = ACCEPT,
4200 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4201 },
4202 {
4203 "helper access to variable memory: map, JMP, wrong max",
4204 .insns = {
4205 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4207 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4208 BPF_LD_MAP_FD(BPF_REG_1, 0),
4209 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4210 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4212 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4213 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4214 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4215 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4216 sizeof(struct test_val) + 1, 4),
4217 BPF_MOV64_IMM(BPF_REG_4, 0),
4218 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4219 BPF_MOV64_IMM(BPF_REG_3, 0),
4220 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4221 BPF_MOV64_IMM(BPF_REG_0, 0),
4222 BPF_EXIT_INSN(),
4223 },
4224 .fixup_map2 = { 3 },
4225 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4226 .result = REJECT,
4227 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4228 },
4229 {
4230 "helper access to variable memory: map adjusted, JMP, correct bounds",
4231 .insns = {
4232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4234 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4235 BPF_LD_MAP_FD(BPF_REG_1, 0),
4236 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4240 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4241 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4242 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4243 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4244 sizeof(struct test_val) - 20, 4),
4245 BPF_MOV64_IMM(BPF_REG_4, 0),
4246 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4247 BPF_MOV64_IMM(BPF_REG_3, 0),
4248 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4249 BPF_MOV64_IMM(BPF_REG_0, 0),
4250 BPF_EXIT_INSN(),
4251 },
4252 .fixup_map2 = { 3 },
4253 .result = ACCEPT,
4254 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4255 },
4256 {
4257 "helper access to variable memory: map adjusted, JMP, wrong max",
4258 .insns = {
4259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4261 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4262 BPF_LD_MAP_FD(BPF_REG_1, 0),
4263 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4267 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4268 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4269 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4270 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4271 sizeof(struct test_val) - 19, 4),
4272 BPF_MOV64_IMM(BPF_REG_4, 0),
4273 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4274 BPF_MOV64_IMM(BPF_REG_3, 0),
4275 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4276 BPF_MOV64_IMM(BPF_REG_0, 0),
4277 BPF_EXIT_INSN(),
4278 },
4279 .fixup_map2 = { 3 },
4280 .errstr = "R1 min value is outside of the array range",
4281 .result = REJECT,
4282 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4283 },
4284 {
4285 "helper access to variable memory: size > 0 not allowed on NULL",
4286 .insns = {
4287 BPF_MOV64_IMM(BPF_REG_1, 0),
4288 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004291 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4292 BPF_MOV64_IMM(BPF_REG_3, 0),
4293 BPF_MOV64_IMM(BPF_REG_4, 0),
4294 BPF_MOV64_IMM(BPF_REG_5, 0),
4295 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4296 BPF_EXIT_INSN(),
4297 },
4298 .errstr = "R1 type=imm expected=fp",
4299 .result = REJECT,
4300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4301 },
4302 {
4303 "helper access to variable memory: size = 0 not allowed on != NULL",
4304 .insns = {
4305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4307 BPF_MOV64_IMM(BPF_REG_2, 0),
4308 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4309 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4310 BPF_MOV64_IMM(BPF_REG_3, 0),
4311 BPF_MOV64_IMM(BPF_REG_4, 0),
4312 BPF_MOV64_IMM(BPF_REG_5, 0),
4313 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4314 BPF_EXIT_INSN(),
4315 },
4316 .errstr = "invalid stack type R1 off=-8 access_size=0",
4317 .result = REJECT,
4318 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4319 },
4320 {
4321 "helper access to variable memory: 8 bytes leak",
4322 .insns = {
4323 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4325 BPF_MOV64_IMM(BPF_REG_0, 0),
4326 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4327 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4329 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4330 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4331 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4332 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4333 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004334 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4335 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004336 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4337 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4338 BPF_MOV64_IMM(BPF_REG_3, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4340 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4341 BPF_EXIT_INSN(),
4342 },
4343 .errstr = "invalid indirect read from stack off -64+32 size 64",
4344 .result = REJECT,
4345 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4346 },
4347 {
4348 "helper access to variable memory: 8 bytes no leak (init memory)",
4349 .insns = {
4350 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4351 BPF_MOV64_IMM(BPF_REG_0, 0),
4352 BPF_MOV64_IMM(BPF_REG_0, 0),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4354 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4355 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4356 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4357 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4359 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4360 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4362 BPF_MOV64_IMM(BPF_REG_2, 0),
4363 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4365 BPF_MOV64_IMM(BPF_REG_3, 0),
4366 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4367 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4368 BPF_EXIT_INSN(),
4369 },
4370 .result = ACCEPT,
4371 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4372 },
Josef Bacik29200c12017-02-03 16:25:23 -05004373 {
4374 "invalid and of negative number",
4375 .insns = {
4376 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4377 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4379 BPF_LD_MAP_FD(BPF_REG_1, 0),
4380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4381 BPF_FUNC_map_lookup_elem),
4382 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4383 BPF_MOV64_IMM(BPF_REG_1, 6),
4384 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4385 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4386 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4387 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4388 offsetof(struct test_val, foo)),
4389 BPF_EXIT_INSN(),
4390 },
4391 .fixup_map2 = { 3 },
4392 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4393 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4394 .result = REJECT,
4395 .result_unpriv = REJECT,
4396 },
4397 {
4398 "invalid range check",
4399 .insns = {
4400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4403 BPF_LD_MAP_FD(BPF_REG_1, 0),
4404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4405 BPF_FUNC_map_lookup_elem),
4406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4407 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4408 BPF_MOV64_IMM(BPF_REG_9, 1),
4409 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4410 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4411 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4412 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4413 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4414 BPF_MOV32_IMM(BPF_REG_3, 1),
4415 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4416 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4417 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4418 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4419 BPF_MOV64_REG(BPF_REG_0, 0),
4420 BPF_EXIT_INSN(),
4421 },
4422 .fixup_map2 = { 3 },
4423 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4424 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4425 .result = REJECT,
4426 .result_unpriv = REJECT,
4427 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004428};
4429
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004430static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004431{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004432 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004433
4434 for (len = MAX_INSNS - 1; len > 0; --len)
4435 if (fp[len].code != 0 || fp[len].imm != 0)
4436 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004437 return len + 1;
4438}
4439
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004440static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004441{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004442 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004443
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004444 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
4445 size_value, max_elem, BPF_F_NO_PREALLOC);
4446 if (fd < 0)
4447 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004448
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004449 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004450}
4451
4452static int create_prog_array(void)
4453{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004454 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004455
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004456 fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
4457 sizeof(int), 4, 0);
4458 if (fd < 0)
4459 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004460
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004461 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004462}
4463
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004464static char bpf_vlog[32768];
4465
4466static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4467 int *fd_f1, int *fd_f2, int *fd_f3)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004468{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004469 int *fixup_map1 = test->fixup_map1;
4470 int *fixup_map2 = test->fixup_map2;
4471 int *fixup_prog = test->fixup_prog;
4472
4473 /* Allocating HTs with 1 elem is fine here, since we only test
4474 * for verifier and not do a runtime lookup, so the only thing
4475 * that really matters is value size in this case.
4476 */
4477 if (*fixup_map1) {
4478 *fd_f1 = create_map(sizeof(long long), 1);
4479 do {
4480 prog[*fixup_map1].imm = *fd_f1;
4481 fixup_map1++;
4482 } while (*fixup_map1);
4483 }
4484
4485 if (*fixup_map2) {
4486 *fd_f2 = create_map(sizeof(struct test_val), 1);
4487 do {
4488 prog[*fixup_map2].imm = *fd_f2;
4489 fixup_map2++;
4490 } while (*fixup_map2);
4491 }
4492
4493 if (*fixup_prog) {
4494 *fd_f3 = create_prog_array();
4495 do {
4496 prog[*fixup_prog].imm = *fd_f3;
4497 fixup_prog++;
4498 } while (*fixup_prog);
4499 }
4500}
4501
4502static void do_test_single(struct bpf_test *test, bool unpriv,
4503 int *passes, int *errors)
4504{
4505 struct bpf_insn *prog = test->insns;
4506 int prog_len = probe_filter_length(prog);
4507 int prog_type = test->prog_type;
4508 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4509 int fd_prog, expected_ret;
4510 const char *expected_err;
4511
4512 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
4513
4514 fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
4515 prog, prog_len * sizeof(struct bpf_insn),
4516 "GPL", bpf_vlog, sizeof(bpf_vlog));
4517
4518 expected_ret = unpriv && test->result_unpriv != UNDEF ?
4519 test->result_unpriv : test->result;
4520 expected_err = unpriv && test->errstr_unpriv ?
4521 test->errstr_unpriv : test->errstr;
4522 if (expected_ret == ACCEPT) {
4523 if (fd_prog < 0) {
4524 printf("FAIL\nFailed to load prog '%s'!\n",
4525 strerror(errno));
4526 goto fail_log;
4527 }
4528 } else {
4529 if (fd_prog >= 0) {
4530 printf("FAIL\nUnexpected success to load!\n");
4531 goto fail_log;
4532 }
4533 if (!strstr(bpf_vlog, expected_err)) {
4534 printf("FAIL\nUnexpected error message!\n");
4535 goto fail_log;
4536 }
4537 }
4538
4539 (*passes)++;
4540 printf("OK\n");
4541close_fds:
4542 close(fd_prog);
4543 close(fd_f1);
4544 close(fd_f2);
4545 close(fd_f3);
4546 sched_yield();
4547 return;
4548fail_log:
4549 (*errors)++;
4550 printf("%s", bpf_vlog);
4551 goto close_fds;
4552}
4553
4554static int do_test(bool unpriv, unsigned int from, unsigned int to)
4555{
4556 int i, passes = 0, errors = 0;
4557
4558 for (i = from; i < to; i++) {
4559 struct bpf_test *test = &tests[i];
4560
4561 /* Program types that are not supported by non-root we
4562 * skip right away.
4563 */
4564 if (unpriv && test->prog_type)
4565 continue;
4566
4567 printf("#%d %s ", i, test->descr);
4568 do_test_single(test, unpriv, &passes, &errors);
4569 }
4570
4571 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
4572 return errors ? -errors : 0;
4573}
4574
4575int main(int argc, char **argv)
4576{
4577 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
4578 struct rlimit rlim = { 1 << 20, 1 << 20 };
4579 unsigned int from = 0, to = ARRAY_SIZE(tests);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004580 bool unpriv = geteuid() != 0;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004581
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004582 if (argc == 3) {
4583 unsigned int l = atoi(argv[argc - 2]);
4584 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004585
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004586 if (l < to && u < to) {
4587 from = l;
4588 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004589 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004590 } else if (argc == 2) {
4591 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004592
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004593 if (t < to) {
4594 from = t;
4595 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004596 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004597 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004598
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004599 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
4600 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004601}