blob: 13341700930c489cb6bb0d4e4c9d539ce8975209 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011#include <asm/types.h>
12#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010013#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070017#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070019#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070020#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020021#include <sched.h>
22
Mickaël Salaünd02d8982017-02-10 00:21:37 +010023#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070024#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070025
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020026#include <linux/unistd.h>
27#include <linux/filter.h>
28#include <linux/bpf_perf_event.h>
29#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070030
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010031#include <bpf/bpf.h>
32
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020033#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020041#include "../../../include/linux/filter.h"
42
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020043#ifndef ARRAY_SIZE
44# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
45#endif
46
47#define MAX_INSNS 512
48#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070049#define MAX_NR_MAPS 4
Alexei Starovoitovbf508872015-10-07 22:23:23 -070050
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020051#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020052#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020053
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070054struct bpf_test {
55 const char *descr;
56 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020057 int fixup_map1[MAX_FIXUPS];
58 int fixup_map2[MAX_FIXUPS];
59 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070060 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070061 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070062 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070063 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070064 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070065 ACCEPT,
66 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070067 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070068 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020069 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070070};
71
Josef Bacik48461132016-09-28 10:54:32 -040072/* Note we want this to be 64 bit aligned so that the end of our array is
73 * actually the end of the structure.
74 */
75#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040076
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020077struct test_val {
78 unsigned int index;
79 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040080};
81
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070082static struct bpf_test tests[] = {
83 {
84 "add+sub+mul",
85 .insns = {
86 BPF_MOV64_IMM(BPF_REG_1, 1),
87 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
88 BPF_MOV64_IMM(BPF_REG_2, 3),
89 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
90 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
91 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
92 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
93 BPF_EXIT_INSN(),
94 },
95 .result = ACCEPT,
96 },
97 {
98 "unreachable",
99 .insns = {
100 BPF_EXIT_INSN(),
101 BPF_EXIT_INSN(),
102 },
103 .errstr = "unreachable",
104 .result = REJECT,
105 },
106 {
107 "unreachable2",
108 .insns = {
109 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
110 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
111 BPF_EXIT_INSN(),
112 },
113 .errstr = "unreachable",
114 .result = REJECT,
115 },
116 {
117 "out of range jump",
118 .insns = {
119 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
120 BPF_EXIT_INSN(),
121 },
122 .errstr = "jump out of range",
123 .result = REJECT,
124 },
125 {
126 "out of range jump2",
127 .insns = {
128 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
129 BPF_EXIT_INSN(),
130 },
131 .errstr = "jump out of range",
132 .result = REJECT,
133 },
134 {
135 "test1 ld_imm64",
136 .insns = {
137 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
138 BPF_LD_IMM64(BPF_REG_0, 0),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 1),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_MOV64_IMM(BPF_REG_0, 2),
143 BPF_EXIT_INSN(),
144 },
145 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700146 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700147 .result = REJECT,
148 },
149 {
150 "test2 ld_imm64",
151 .insns = {
152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
153 BPF_LD_IMM64(BPF_REG_0, 0),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 1),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_EXIT_INSN(),
158 },
159 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700160 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700161 .result = REJECT,
162 },
163 {
164 "test3 ld_imm64",
165 .insns = {
166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
167 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
168 BPF_LD_IMM64(BPF_REG_0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 1),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_EXIT_INSN(),
173 },
174 .errstr = "invalid bpf_ld_imm64 insn",
175 .result = REJECT,
176 },
177 {
178 "test4 ld_imm64",
179 .insns = {
180 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
181 BPF_EXIT_INSN(),
182 },
183 .errstr = "invalid bpf_ld_imm64 insn",
184 .result = REJECT,
185 },
186 {
187 "test5 ld_imm64",
188 .insns = {
189 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
190 },
191 .errstr = "invalid bpf_ld_imm64 insn",
192 .result = REJECT,
193 },
194 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200195 "test6 ld_imm64",
196 .insns = {
197 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
198 BPF_RAW_INSN(0, 0, 0, 0, 0),
199 BPF_EXIT_INSN(),
200 },
201 .result = ACCEPT,
202 },
203 {
204 "test7 ld_imm64",
205 .insns = {
206 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
207 BPF_RAW_INSN(0, 0, 0, 0, 1),
208 BPF_EXIT_INSN(),
209 },
210 .result = ACCEPT,
211 },
212 {
213 "test8 ld_imm64",
214 .insns = {
215 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
216 BPF_RAW_INSN(0, 0, 0, 0, 1),
217 BPF_EXIT_INSN(),
218 },
219 .errstr = "uses reserved fields",
220 .result = REJECT,
221 },
222 {
223 "test9 ld_imm64",
224 .insns = {
225 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
226 BPF_RAW_INSN(0, 0, 0, 1, 1),
227 BPF_EXIT_INSN(),
228 },
229 .errstr = "invalid bpf_ld_imm64 insn",
230 .result = REJECT,
231 },
232 {
233 "test10 ld_imm64",
234 .insns = {
235 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
236 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
237 BPF_EXIT_INSN(),
238 },
239 .errstr = "invalid bpf_ld_imm64 insn",
240 .result = REJECT,
241 },
242 {
243 "test11 ld_imm64",
244 .insns = {
245 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
246 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
247 BPF_EXIT_INSN(),
248 },
249 .errstr = "invalid bpf_ld_imm64 insn",
250 .result = REJECT,
251 },
252 {
253 "test12 ld_imm64",
254 .insns = {
255 BPF_MOV64_IMM(BPF_REG_1, 0),
256 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
257 BPF_RAW_INSN(0, 0, 0, 0, 1),
258 BPF_EXIT_INSN(),
259 },
260 .errstr = "not pointing to valid bpf_map",
261 .result = REJECT,
262 },
263 {
264 "test13 ld_imm64",
265 .insns = {
266 BPF_MOV64_IMM(BPF_REG_1, 0),
267 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
268 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
269 BPF_EXIT_INSN(),
270 },
271 .errstr = "invalid bpf_ld_imm64 insn",
272 .result = REJECT,
273 },
274 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700275 "no bpf_exit",
276 .insns = {
277 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
278 },
279 .errstr = "jump out of range",
280 .result = REJECT,
281 },
282 {
283 "loop (back-edge)",
284 .insns = {
285 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
286 BPF_EXIT_INSN(),
287 },
288 .errstr = "back-edge",
289 .result = REJECT,
290 },
291 {
292 "loop2 (back-edge)",
293 .insns = {
294 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
295 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
296 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
297 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
298 BPF_EXIT_INSN(),
299 },
300 .errstr = "back-edge",
301 .result = REJECT,
302 },
303 {
304 "conditional loop",
305 .insns = {
306 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
308 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
309 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
310 BPF_EXIT_INSN(),
311 },
312 .errstr = "back-edge",
313 .result = REJECT,
314 },
315 {
316 "read uninitialized register",
317 .insns = {
318 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
319 BPF_EXIT_INSN(),
320 },
321 .errstr = "R2 !read_ok",
322 .result = REJECT,
323 },
324 {
325 "read invalid register",
326 .insns = {
327 BPF_MOV64_REG(BPF_REG_0, -1),
328 BPF_EXIT_INSN(),
329 },
330 .errstr = "R15 is invalid",
331 .result = REJECT,
332 },
333 {
334 "program doesn't init R0 before exit",
335 .insns = {
336 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
337 BPF_EXIT_INSN(),
338 },
339 .errstr = "R0 !read_ok",
340 .result = REJECT,
341 },
342 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700343 "program doesn't init R0 before exit in all branches",
344 .insns = {
345 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
346 BPF_MOV64_IMM(BPF_REG_0, 1),
347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
348 BPF_EXIT_INSN(),
349 },
350 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700351 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700352 .result = REJECT,
353 },
354 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700355 "stack out of bounds",
356 .insns = {
357 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
358 BPF_EXIT_INSN(),
359 },
360 .errstr = "invalid stack",
361 .result = REJECT,
362 },
363 {
364 "invalid call insn1",
365 .insns = {
366 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
367 BPF_EXIT_INSN(),
368 },
369 .errstr = "BPF_CALL uses reserved",
370 .result = REJECT,
371 },
372 {
373 "invalid call insn2",
374 .insns = {
375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
376 BPF_EXIT_INSN(),
377 },
378 .errstr = "BPF_CALL uses reserved",
379 .result = REJECT,
380 },
381 {
382 "invalid function call",
383 .insns = {
384 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
385 BPF_EXIT_INSN(),
386 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100387 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700388 .result = REJECT,
389 },
390 {
391 "uninitialized stack1",
392 .insns = {
393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
395 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
397 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700398 BPF_EXIT_INSN(),
399 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200400 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700401 .errstr = "invalid indirect read from stack",
402 .result = REJECT,
403 },
404 {
405 "uninitialized stack2",
406 .insns = {
407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
408 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
409 BPF_EXIT_INSN(),
410 },
411 .errstr = "invalid read from stack",
412 .result = REJECT,
413 },
414 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200415 "invalid fp arithmetic",
416 /* If this gets ever changed, make sure JITs can deal with it. */
417 .insns = {
418 BPF_MOV64_IMM(BPF_REG_0, 0),
419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
420 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
421 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
422 BPF_EXIT_INSN(),
423 },
424 .errstr_unpriv = "R1 pointer arithmetic",
425 .result_unpriv = REJECT,
426 .errstr = "R1 invalid mem access",
427 .result = REJECT,
428 },
429 {
430 "non-invalid fp arithmetic",
431 .insns = {
432 BPF_MOV64_IMM(BPF_REG_0, 0),
433 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
434 BPF_EXIT_INSN(),
435 },
436 .result = ACCEPT,
437 },
438 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200439 "invalid argument register",
440 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
442 BPF_FUNC_get_cgroup_classid),
443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
444 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200445 BPF_EXIT_INSN(),
446 },
447 .errstr = "R1 !read_ok",
448 .result = REJECT,
449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
450 },
451 {
452 "non-invalid argument register",
453 .insns = {
454 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
456 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200457 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
459 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200460 BPF_EXIT_INSN(),
461 },
462 .result = ACCEPT,
463 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
464 },
465 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700466 "check valid spill/fill",
467 .insns = {
468 /* spill R1(ctx) into stack */
469 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700470 /* fill it back into R2 */
471 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700472 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100473 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
474 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700475 BPF_EXIT_INSN(),
476 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700477 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700478 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700479 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700480 },
481 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200482 "check valid spill/fill, skb mark",
483 .insns = {
484 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
485 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
486 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
488 offsetof(struct __sk_buff, mark)),
489 BPF_EXIT_INSN(),
490 },
491 .result = ACCEPT,
492 .result_unpriv = ACCEPT,
493 },
494 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700495 "check corrupted spill/fill",
496 .insns = {
497 /* spill R1(ctx) into stack */
498 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700499 /* mess up with R1 pointer on stack */
500 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700501 /* fill back into R0 should fail */
502 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700503 BPF_EXIT_INSN(),
504 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700505 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700506 .errstr = "corrupted spill",
507 .result = REJECT,
508 },
509 {
510 "invalid src register in STX",
511 .insns = {
512 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
513 BPF_EXIT_INSN(),
514 },
515 .errstr = "R15 is invalid",
516 .result = REJECT,
517 },
518 {
519 "invalid dst register in STX",
520 .insns = {
521 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
522 BPF_EXIT_INSN(),
523 },
524 .errstr = "R14 is invalid",
525 .result = REJECT,
526 },
527 {
528 "invalid dst register in ST",
529 .insns = {
530 BPF_ST_MEM(BPF_B, 14, -1, -1),
531 BPF_EXIT_INSN(),
532 },
533 .errstr = "R14 is invalid",
534 .result = REJECT,
535 },
536 {
537 "invalid src register in LDX",
538 .insns = {
539 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
540 BPF_EXIT_INSN(),
541 },
542 .errstr = "R12 is invalid",
543 .result = REJECT,
544 },
545 {
546 "invalid dst register in LDX",
547 .insns = {
548 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
549 BPF_EXIT_INSN(),
550 },
551 .errstr = "R11 is invalid",
552 .result = REJECT,
553 },
554 {
555 "junk insn",
556 .insns = {
557 BPF_RAW_INSN(0, 0, 0, 0, 0),
558 BPF_EXIT_INSN(),
559 },
560 .errstr = "invalid BPF_LD_IMM",
561 .result = REJECT,
562 },
563 {
564 "junk insn2",
565 .insns = {
566 BPF_RAW_INSN(1, 0, 0, 0, 0),
567 BPF_EXIT_INSN(),
568 },
569 .errstr = "BPF_LDX uses reserved fields",
570 .result = REJECT,
571 },
572 {
573 "junk insn3",
574 .insns = {
575 BPF_RAW_INSN(-1, 0, 0, 0, 0),
576 BPF_EXIT_INSN(),
577 },
578 .errstr = "invalid BPF_ALU opcode f0",
579 .result = REJECT,
580 },
581 {
582 "junk insn4",
583 .insns = {
584 BPF_RAW_INSN(-1, -1, -1, -1, -1),
585 BPF_EXIT_INSN(),
586 },
587 .errstr = "invalid BPF_ALU opcode f0",
588 .result = REJECT,
589 },
590 {
591 "junk insn5",
592 .insns = {
593 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
594 BPF_EXIT_INSN(),
595 },
596 .errstr = "BPF_ALU uses reserved fields",
597 .result = REJECT,
598 },
599 {
600 "misaligned read from stack",
601 .insns = {
602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
603 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
604 BPF_EXIT_INSN(),
605 },
606 .errstr = "misaligned access",
607 .result = REJECT,
608 },
609 {
610 "invalid map_fd for function call",
611 .insns = {
612 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
613 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
615 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200616 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
617 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700618 BPF_EXIT_INSN(),
619 },
620 .errstr = "fd 0 is not pointing to valid bpf_map",
621 .result = REJECT,
622 },
623 {
624 "don't check return value before access",
625 .insns = {
626 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
627 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
629 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200630 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
631 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700632 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
633 BPF_EXIT_INSN(),
634 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200635 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700636 .errstr = "R0 invalid mem access 'map_value_or_null'",
637 .result = REJECT,
638 },
639 {
640 "access memory with incorrect alignment",
641 .insns = {
642 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
645 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
647 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
649 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
650 BPF_EXIT_INSN(),
651 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200652 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700653 .errstr = "misaligned access",
654 .result = REJECT,
655 },
656 {
657 "sometimes access memory with incorrect alignment",
658 .insns = {
659 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
662 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
664 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
666 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
667 BPF_EXIT_INSN(),
668 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
669 BPF_EXIT_INSN(),
670 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200671 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700672 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700673 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700674 .result = REJECT,
675 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700676 {
677 "jump test 1",
678 .insns = {
679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
680 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
682 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
684 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
686 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
688 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
690 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
692 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
693 BPF_MOV64_IMM(BPF_REG_0, 0),
694 BPF_EXIT_INSN(),
695 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700696 .errstr_unpriv = "R1 pointer comparison",
697 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700698 .result = ACCEPT,
699 },
700 {
701 "jump test 2",
702 .insns = {
703 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
704 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
705 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
706 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
708 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
709 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
711 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
712 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
713 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
714 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
715 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
716 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
717 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
718 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
719 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
720 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
721 BPF_MOV64_IMM(BPF_REG_0, 0),
722 BPF_EXIT_INSN(),
723 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700724 .errstr_unpriv = "R1 pointer comparison",
725 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700726 .result = ACCEPT,
727 },
728 {
729 "jump test 3",
730 .insns = {
731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
733 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
735 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
739 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
743 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
745 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
751 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
753 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
755 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
757 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700758 BPF_EXIT_INSN(),
759 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200760 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700761 .errstr_unpriv = "R1 pointer comparison",
762 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700763 .result = ACCEPT,
764 },
765 {
766 "jump test 4",
767 .insns = {
768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
769 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
808 BPF_MOV64_IMM(BPF_REG_0, 0),
809 BPF_EXIT_INSN(),
810 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700811 .errstr_unpriv = "R1 pointer comparison",
812 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700813 .result = ACCEPT,
814 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700815 {
816 "jump test 5",
817 .insns = {
818 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
819 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
820 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
821 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
822 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
823 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
824 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
825 BPF_MOV64_IMM(BPF_REG_0, 0),
826 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
827 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
828 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
829 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
830 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
831 BPF_MOV64_IMM(BPF_REG_0, 0),
832 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
833 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
834 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
835 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
836 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
837 BPF_MOV64_IMM(BPF_REG_0, 0),
838 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
839 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
840 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
841 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
842 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
843 BPF_MOV64_IMM(BPF_REG_0, 0),
844 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
845 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
846 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
847 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
848 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
849 BPF_MOV64_IMM(BPF_REG_0, 0),
850 BPF_EXIT_INSN(),
851 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700852 .errstr_unpriv = "R1 pointer comparison",
853 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700854 .result = ACCEPT,
855 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700856 {
857 "access skb fields ok",
858 .insns = {
859 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
860 offsetof(struct __sk_buff, len)),
861 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
862 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
863 offsetof(struct __sk_buff, mark)),
864 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
865 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
866 offsetof(struct __sk_buff, pkt_type)),
867 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
868 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
869 offsetof(struct __sk_buff, queue_mapping)),
870 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700871 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
872 offsetof(struct __sk_buff, protocol)),
873 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
874 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
875 offsetof(struct __sk_buff, vlan_present)),
876 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
877 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
878 offsetof(struct __sk_buff, vlan_tci)),
879 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200880 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
881 offsetof(struct __sk_buff, napi_id)),
882 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700883 BPF_EXIT_INSN(),
884 },
885 .result = ACCEPT,
886 },
887 {
888 "access skb fields bad1",
889 .insns = {
890 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
891 BPF_EXIT_INSN(),
892 },
893 .errstr = "invalid bpf_context access",
894 .result = REJECT,
895 },
896 {
897 "access skb fields bad2",
898 .insns = {
899 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
900 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
901 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
903 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
905 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700906 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
907 BPF_EXIT_INSN(),
908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
909 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
910 offsetof(struct __sk_buff, pkt_type)),
911 BPF_EXIT_INSN(),
912 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200913 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700914 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700915 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700916 .result = REJECT,
917 },
918 {
919 "access skb fields bad3",
920 .insns = {
921 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
922 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
923 offsetof(struct __sk_buff, pkt_type)),
924 BPF_EXIT_INSN(),
925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
926 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
928 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
930 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700931 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
932 BPF_EXIT_INSN(),
933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
934 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
935 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200936 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700937 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700938 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700939 .result = REJECT,
940 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700941 {
942 "access skb fields bad4",
943 .insns = {
944 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
945 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
946 offsetof(struct __sk_buff, len)),
947 BPF_MOV64_IMM(BPF_REG_0, 0),
948 BPF_EXIT_INSN(),
949 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
950 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
952 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
954 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700955 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
956 BPF_EXIT_INSN(),
957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
958 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
959 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200960 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700961 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700962 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700963 .result = REJECT,
964 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700965 {
966 "check skb->mark is not writeable by sockets",
967 .insns = {
968 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
969 offsetof(struct __sk_buff, mark)),
970 BPF_EXIT_INSN(),
971 },
972 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700973 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700974 .result = REJECT,
975 },
976 {
977 "check skb->tc_index is not writeable by sockets",
978 .insns = {
979 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
980 offsetof(struct __sk_buff, tc_index)),
981 BPF_EXIT_INSN(),
982 },
983 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700984 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700985 .result = REJECT,
986 },
987 {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100988 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700989 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100990 BPF_MOV64_IMM(BPF_REG_0, 0),
991 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
992 offsetof(struct __sk_buff, cb[0])),
993 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
994 offsetof(struct __sk_buff, cb[0]) + 1),
995 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
996 offsetof(struct __sk_buff, cb[0]) + 2),
997 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
998 offsetof(struct __sk_buff, cb[0]) + 3),
999 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1000 offsetof(struct __sk_buff, cb[1])),
1001 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1002 offsetof(struct __sk_buff, cb[1]) + 1),
1003 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1004 offsetof(struct __sk_buff, cb[1]) + 2),
1005 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1006 offsetof(struct __sk_buff, cb[1]) + 3),
1007 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1008 offsetof(struct __sk_buff, cb[2])),
1009 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1010 offsetof(struct __sk_buff, cb[2]) + 1),
1011 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1012 offsetof(struct __sk_buff, cb[2]) + 2),
1013 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1014 offsetof(struct __sk_buff, cb[2]) + 3),
1015 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1016 offsetof(struct __sk_buff, cb[3])),
1017 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1018 offsetof(struct __sk_buff, cb[3]) + 1),
1019 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1020 offsetof(struct __sk_buff, cb[3]) + 2),
1021 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1022 offsetof(struct __sk_buff, cb[3]) + 3),
1023 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1024 offsetof(struct __sk_buff, cb[4])),
1025 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1026 offsetof(struct __sk_buff, cb[4]) + 1),
1027 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1028 offsetof(struct __sk_buff, cb[4]) + 2),
1029 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1030 offsetof(struct __sk_buff, cb[4]) + 3),
1031 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1032 offsetof(struct __sk_buff, cb[0])),
1033 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1034 offsetof(struct __sk_buff, cb[0]) + 1),
1035 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1036 offsetof(struct __sk_buff, cb[0]) + 2),
1037 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1038 offsetof(struct __sk_buff, cb[0]) + 3),
1039 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1040 offsetof(struct __sk_buff, cb[1])),
1041 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1042 offsetof(struct __sk_buff, cb[1]) + 1),
1043 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1044 offsetof(struct __sk_buff, cb[1]) + 2),
1045 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1046 offsetof(struct __sk_buff, cb[1]) + 3),
1047 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1048 offsetof(struct __sk_buff, cb[2])),
1049 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1050 offsetof(struct __sk_buff, cb[2]) + 1),
1051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, cb[2]) + 2),
1053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1054 offsetof(struct __sk_buff, cb[2]) + 3),
1055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1056 offsetof(struct __sk_buff, cb[3])),
1057 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1058 offsetof(struct __sk_buff, cb[3]) + 1),
1059 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1060 offsetof(struct __sk_buff, cb[3]) + 2),
1061 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, cb[3]) + 3),
1063 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, cb[4])),
1065 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1066 offsetof(struct __sk_buff, cb[4]) + 1),
1067 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1068 offsetof(struct __sk_buff, cb[4]) + 2),
1069 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1070 offsetof(struct __sk_buff, cb[4]) + 3),
1071 BPF_EXIT_INSN(),
1072 },
1073 .result = ACCEPT,
1074 },
1075 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001076 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001077 .insns = {
1078 BPF_MOV64_IMM(BPF_REG_0, 0),
1079 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001080 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001081 BPF_EXIT_INSN(),
1082 },
1083 .errstr = "invalid bpf_context access",
1084 .result = REJECT,
1085 },
1086 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001087 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001088 .insns = {
1089 BPF_MOV64_IMM(BPF_REG_0, 0),
1090 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001091 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001092 BPF_EXIT_INSN(),
1093 },
1094 .errstr = "invalid bpf_context access",
1095 .result = REJECT,
1096 },
1097 {
1098 "check cb access: byte, wrong type",
1099 .insns = {
1100 BPF_MOV64_IMM(BPF_REG_0, 0),
1101 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001102 offsetof(struct __sk_buff, cb[0])),
1103 BPF_EXIT_INSN(),
1104 },
1105 .errstr = "invalid bpf_context access",
1106 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001107 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1108 },
1109 {
1110 "check cb access: half",
1111 .insns = {
1112 BPF_MOV64_IMM(BPF_REG_0, 0),
1113 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1114 offsetof(struct __sk_buff, cb[0])),
1115 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1116 offsetof(struct __sk_buff, cb[0]) + 2),
1117 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1118 offsetof(struct __sk_buff, cb[1])),
1119 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1120 offsetof(struct __sk_buff, cb[1]) + 2),
1121 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1122 offsetof(struct __sk_buff, cb[2])),
1123 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1124 offsetof(struct __sk_buff, cb[2]) + 2),
1125 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1126 offsetof(struct __sk_buff, cb[3])),
1127 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1128 offsetof(struct __sk_buff, cb[3]) + 2),
1129 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1130 offsetof(struct __sk_buff, cb[4])),
1131 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1132 offsetof(struct __sk_buff, cb[4]) + 2),
1133 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1134 offsetof(struct __sk_buff, cb[0])),
1135 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1136 offsetof(struct __sk_buff, cb[0]) + 2),
1137 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1138 offsetof(struct __sk_buff, cb[1])),
1139 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1140 offsetof(struct __sk_buff, cb[1]) + 2),
1141 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1142 offsetof(struct __sk_buff, cb[2])),
1143 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1144 offsetof(struct __sk_buff, cb[2]) + 2),
1145 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1146 offsetof(struct __sk_buff, cb[3])),
1147 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1148 offsetof(struct __sk_buff, cb[3]) + 2),
1149 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1150 offsetof(struct __sk_buff, cb[4])),
1151 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1152 offsetof(struct __sk_buff, cb[4]) + 2),
1153 BPF_EXIT_INSN(),
1154 },
1155 .result = ACCEPT,
1156 },
1157 {
1158 "check cb access: half, unaligned",
1159 .insns = {
1160 BPF_MOV64_IMM(BPF_REG_0, 0),
1161 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1162 offsetof(struct __sk_buff, cb[0]) + 1),
1163 BPF_EXIT_INSN(),
1164 },
1165 .errstr = "misaligned access",
1166 .result = REJECT,
1167 },
1168 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001169 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001170 .insns = {
1171 BPF_MOV64_IMM(BPF_REG_0, 0),
1172 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001173 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001174 BPF_EXIT_INSN(),
1175 },
1176 .errstr = "invalid bpf_context access",
1177 .result = REJECT,
1178 },
1179 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001180 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001181 .insns = {
1182 BPF_MOV64_IMM(BPF_REG_0, 0),
1183 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001184 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001185 BPF_EXIT_INSN(),
1186 },
1187 .errstr = "invalid bpf_context access",
1188 .result = REJECT,
1189 },
1190 {
1191 "check cb access: half, wrong type",
1192 .insns = {
1193 BPF_MOV64_IMM(BPF_REG_0, 0),
1194 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1195 offsetof(struct __sk_buff, cb[0])),
1196 BPF_EXIT_INSN(),
1197 },
1198 .errstr = "invalid bpf_context access",
1199 .result = REJECT,
1200 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1201 },
1202 {
1203 "check cb access: word",
1204 .insns = {
1205 BPF_MOV64_IMM(BPF_REG_0, 0),
1206 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1207 offsetof(struct __sk_buff, cb[0])),
1208 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1209 offsetof(struct __sk_buff, cb[1])),
1210 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1211 offsetof(struct __sk_buff, cb[2])),
1212 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1213 offsetof(struct __sk_buff, cb[3])),
1214 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1215 offsetof(struct __sk_buff, cb[4])),
1216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1217 offsetof(struct __sk_buff, cb[0])),
1218 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1219 offsetof(struct __sk_buff, cb[1])),
1220 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1221 offsetof(struct __sk_buff, cb[2])),
1222 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1223 offsetof(struct __sk_buff, cb[3])),
1224 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1225 offsetof(struct __sk_buff, cb[4])),
1226 BPF_EXIT_INSN(),
1227 },
1228 .result = ACCEPT,
1229 },
1230 {
1231 "check cb access: word, unaligned 1",
1232 .insns = {
1233 BPF_MOV64_IMM(BPF_REG_0, 0),
1234 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1235 offsetof(struct __sk_buff, cb[0]) + 2),
1236 BPF_EXIT_INSN(),
1237 },
1238 .errstr = "misaligned access",
1239 .result = REJECT,
1240 },
1241 {
1242 "check cb access: word, unaligned 2",
1243 .insns = {
1244 BPF_MOV64_IMM(BPF_REG_0, 0),
1245 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1246 offsetof(struct __sk_buff, cb[4]) + 1),
1247 BPF_EXIT_INSN(),
1248 },
1249 .errstr = "misaligned access",
1250 .result = REJECT,
1251 },
1252 {
1253 "check cb access: word, unaligned 3",
1254 .insns = {
1255 BPF_MOV64_IMM(BPF_REG_0, 0),
1256 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1257 offsetof(struct __sk_buff, cb[4]) + 2),
1258 BPF_EXIT_INSN(),
1259 },
1260 .errstr = "misaligned access",
1261 .result = REJECT,
1262 },
1263 {
1264 "check cb access: word, unaligned 4",
1265 .insns = {
1266 BPF_MOV64_IMM(BPF_REG_0, 0),
1267 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1268 offsetof(struct __sk_buff, cb[4]) + 3),
1269 BPF_EXIT_INSN(),
1270 },
1271 .errstr = "misaligned access",
1272 .result = REJECT,
1273 },
1274 {
1275 "check cb access: double",
1276 .insns = {
1277 BPF_MOV64_IMM(BPF_REG_0, 0),
1278 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1279 offsetof(struct __sk_buff, cb[0])),
1280 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1281 offsetof(struct __sk_buff, cb[2])),
1282 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1283 offsetof(struct __sk_buff, cb[0])),
1284 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1285 offsetof(struct __sk_buff, cb[2])),
1286 BPF_EXIT_INSN(),
1287 },
1288 .result = ACCEPT,
1289 },
1290 {
1291 "check cb access: double, unaligned 1",
1292 .insns = {
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
1294 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1295 offsetof(struct __sk_buff, cb[1])),
1296 BPF_EXIT_INSN(),
1297 },
1298 .errstr = "misaligned access",
1299 .result = REJECT,
1300 },
1301 {
1302 "check cb access: double, unaligned 2",
1303 .insns = {
1304 BPF_MOV64_IMM(BPF_REG_0, 0),
1305 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1306 offsetof(struct __sk_buff, cb[3])),
1307 BPF_EXIT_INSN(),
1308 },
1309 .errstr = "misaligned access",
1310 .result = REJECT,
1311 },
1312 {
1313 "check cb access: double, oob 1",
1314 .insns = {
1315 BPF_MOV64_IMM(BPF_REG_0, 0),
1316 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1317 offsetof(struct __sk_buff, cb[4])),
1318 BPF_EXIT_INSN(),
1319 },
1320 .errstr = "invalid bpf_context access",
1321 .result = REJECT,
1322 },
1323 {
1324 "check cb access: double, oob 2",
1325 .insns = {
1326 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001327 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1328 offsetof(struct __sk_buff, cb[4])),
1329 BPF_EXIT_INSN(),
1330 },
1331 .errstr = "invalid bpf_context access",
1332 .result = REJECT,
1333 },
1334 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001335 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001336 .insns = {
1337 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07001338 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1339 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001340 BPF_EXIT_INSN(),
1341 },
1342 .errstr = "invalid bpf_context access",
1343 .result = REJECT,
1344 },
1345 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001346 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001347 .insns = {
1348 BPF_MOV64_IMM(BPF_REG_0, 0),
1349 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07001350 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001351 BPF_EXIT_INSN(),
1352 },
1353 .errstr = "invalid bpf_context access",
1354 .result = REJECT,
1355 },
1356 {
1357 "check cb access: double, wrong type",
1358 .insns = {
1359 BPF_MOV64_IMM(BPF_REG_0, 0),
1360 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1361 offsetof(struct __sk_buff, cb[0])),
1362 BPF_EXIT_INSN(),
1363 },
1364 .errstr = "invalid bpf_context access",
1365 .result = REJECT,
1366 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001367 },
1368 {
1369 "check out of range skb->cb access",
1370 .insns = {
1371 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001372 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001373 BPF_EXIT_INSN(),
1374 },
1375 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001376 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001377 .result = REJECT,
1378 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1379 },
1380 {
1381 "write skb fields from socket prog",
1382 .insns = {
1383 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1384 offsetof(struct __sk_buff, cb[4])),
1385 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1386 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1387 offsetof(struct __sk_buff, mark)),
1388 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1389 offsetof(struct __sk_buff, tc_index)),
1390 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1391 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1392 offsetof(struct __sk_buff, cb[0])),
1393 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1394 offsetof(struct __sk_buff, cb[2])),
1395 BPF_EXIT_INSN(),
1396 },
1397 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001398 .errstr_unpriv = "R1 leaks addr",
1399 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001400 },
1401 {
1402 "write skb fields from tc_cls_act prog",
1403 .insns = {
1404 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1405 offsetof(struct __sk_buff, cb[0])),
1406 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1407 offsetof(struct __sk_buff, mark)),
1408 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, tc_index)),
1410 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1411 offsetof(struct __sk_buff, tc_index)),
1412 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1413 offsetof(struct __sk_buff, cb[3])),
1414 BPF_EXIT_INSN(),
1415 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001416 .errstr_unpriv = "",
1417 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001418 .result = ACCEPT,
1419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1420 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001421 {
1422 "PTR_TO_STACK store/load",
1423 .insns = {
1424 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1426 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1427 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1428 BPF_EXIT_INSN(),
1429 },
1430 .result = ACCEPT,
1431 },
1432 {
1433 "PTR_TO_STACK store/load - bad alignment on off",
1434 .insns = {
1435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1437 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1438 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1439 BPF_EXIT_INSN(),
1440 },
1441 .result = REJECT,
1442 .errstr = "misaligned access off -6 size 8",
1443 },
1444 {
1445 "PTR_TO_STACK store/load - bad alignment on reg",
1446 .insns = {
1447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1449 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1450 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1451 BPF_EXIT_INSN(),
1452 },
1453 .result = REJECT,
1454 .errstr = "misaligned access off -2 size 8",
1455 },
1456 {
1457 "PTR_TO_STACK store/load - out of bounds low",
1458 .insns = {
1459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1461 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1462 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1463 BPF_EXIT_INSN(),
1464 },
1465 .result = REJECT,
1466 .errstr = "invalid stack off=-79992 size=8",
1467 },
1468 {
1469 "PTR_TO_STACK store/load - out of bounds high",
1470 .insns = {
1471 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1473 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1474 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1475 BPF_EXIT_INSN(),
1476 },
1477 .result = REJECT,
1478 .errstr = "invalid stack off=0 size=8",
1479 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001480 {
1481 "unpriv: return pointer",
1482 .insns = {
1483 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1484 BPF_EXIT_INSN(),
1485 },
1486 .result = ACCEPT,
1487 .result_unpriv = REJECT,
1488 .errstr_unpriv = "R0 leaks addr",
1489 },
1490 {
1491 "unpriv: add const to pointer",
1492 .insns = {
1493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1494 BPF_MOV64_IMM(BPF_REG_0, 0),
1495 BPF_EXIT_INSN(),
1496 },
1497 .result = ACCEPT,
1498 .result_unpriv = REJECT,
1499 .errstr_unpriv = "R1 pointer arithmetic",
1500 },
1501 {
1502 "unpriv: add pointer to pointer",
1503 .insns = {
1504 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1505 BPF_MOV64_IMM(BPF_REG_0, 0),
1506 BPF_EXIT_INSN(),
1507 },
1508 .result = ACCEPT,
1509 .result_unpriv = REJECT,
1510 .errstr_unpriv = "R1 pointer arithmetic",
1511 },
1512 {
1513 "unpriv: neg pointer",
1514 .insns = {
1515 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1516 BPF_MOV64_IMM(BPF_REG_0, 0),
1517 BPF_EXIT_INSN(),
1518 },
1519 .result = ACCEPT,
1520 .result_unpriv = REJECT,
1521 .errstr_unpriv = "R1 pointer arithmetic",
1522 },
1523 {
1524 "unpriv: cmp pointer with const",
1525 .insns = {
1526 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1527 BPF_MOV64_IMM(BPF_REG_0, 0),
1528 BPF_EXIT_INSN(),
1529 },
1530 .result = ACCEPT,
1531 .result_unpriv = REJECT,
1532 .errstr_unpriv = "R1 pointer comparison",
1533 },
1534 {
1535 "unpriv: cmp pointer with pointer",
1536 .insns = {
1537 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1538 BPF_MOV64_IMM(BPF_REG_0, 0),
1539 BPF_EXIT_INSN(),
1540 },
1541 .result = ACCEPT,
1542 .result_unpriv = REJECT,
1543 .errstr_unpriv = "R10 pointer comparison",
1544 },
1545 {
1546 "unpriv: check that printk is disallowed",
1547 .insns = {
1548 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1551 BPF_MOV64_IMM(BPF_REG_2, 8),
1552 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1554 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001555 BPF_MOV64_IMM(BPF_REG_0, 0),
1556 BPF_EXIT_INSN(),
1557 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001558 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001559 .result_unpriv = REJECT,
1560 .result = ACCEPT,
1561 },
1562 {
1563 "unpriv: pass pointer to helper function",
1564 .insns = {
1565 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1566 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1568 BPF_LD_MAP_FD(BPF_REG_1, 0),
1569 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1570 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1572 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001573 BPF_MOV64_IMM(BPF_REG_0, 0),
1574 BPF_EXIT_INSN(),
1575 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001576 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001577 .errstr_unpriv = "R4 leaks addr",
1578 .result_unpriv = REJECT,
1579 .result = ACCEPT,
1580 },
1581 {
1582 "unpriv: indirectly pass pointer on stack to helper function",
1583 .insns = {
1584 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1585 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1587 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001588 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1589 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001590 BPF_MOV64_IMM(BPF_REG_0, 0),
1591 BPF_EXIT_INSN(),
1592 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001593 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001594 .errstr = "invalid indirect read from stack off -8+0 size 8",
1595 .result = REJECT,
1596 },
1597 {
1598 "unpriv: mangle pointer on stack 1",
1599 .insns = {
1600 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1601 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1602 BPF_MOV64_IMM(BPF_REG_0, 0),
1603 BPF_EXIT_INSN(),
1604 },
1605 .errstr_unpriv = "attempt to corrupt spilled",
1606 .result_unpriv = REJECT,
1607 .result = ACCEPT,
1608 },
1609 {
1610 "unpriv: mangle pointer on stack 2",
1611 .insns = {
1612 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1613 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1614 BPF_MOV64_IMM(BPF_REG_0, 0),
1615 BPF_EXIT_INSN(),
1616 },
1617 .errstr_unpriv = "attempt to corrupt spilled",
1618 .result_unpriv = REJECT,
1619 .result = ACCEPT,
1620 },
1621 {
1622 "unpriv: read pointer from stack in small chunks",
1623 .insns = {
1624 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1625 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1626 BPF_MOV64_IMM(BPF_REG_0, 0),
1627 BPF_EXIT_INSN(),
1628 },
1629 .errstr = "invalid size",
1630 .result = REJECT,
1631 },
1632 {
1633 "unpriv: write pointer into ctx",
1634 .insns = {
1635 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1636 BPF_MOV64_IMM(BPF_REG_0, 0),
1637 BPF_EXIT_INSN(),
1638 },
1639 .errstr_unpriv = "R1 leaks addr",
1640 .result_unpriv = REJECT,
1641 .errstr = "invalid bpf_context access",
1642 .result = REJECT,
1643 },
1644 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001645 "unpriv: spill/fill of ctx",
1646 .insns = {
1647 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1649 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1650 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1651 BPF_MOV64_IMM(BPF_REG_0, 0),
1652 BPF_EXIT_INSN(),
1653 },
1654 .result = ACCEPT,
1655 },
1656 {
1657 "unpriv: spill/fill of ctx 2",
1658 .insns = {
1659 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1661 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1662 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1664 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001665 BPF_EXIT_INSN(),
1666 },
1667 .result = ACCEPT,
1668 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1669 },
1670 {
1671 "unpriv: spill/fill of ctx 3",
1672 .insns = {
1673 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1675 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1676 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1677 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1679 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001680 BPF_EXIT_INSN(),
1681 },
1682 .result = REJECT,
1683 .errstr = "R1 type=fp expected=ctx",
1684 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1685 },
1686 {
1687 "unpriv: spill/fill of ctx 4",
1688 .insns = {
1689 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1691 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1692 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001693 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1694 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001695 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001696 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1697 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001698 BPF_EXIT_INSN(),
1699 },
1700 .result = REJECT,
1701 .errstr = "R1 type=inv expected=ctx",
1702 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1703 },
1704 {
1705 "unpriv: spill/fill of different pointers stx",
1706 .insns = {
1707 BPF_MOV64_IMM(BPF_REG_3, 42),
1708 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1711 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1712 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1713 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1714 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1715 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1716 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1717 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1718 offsetof(struct __sk_buff, mark)),
1719 BPF_MOV64_IMM(BPF_REG_0, 0),
1720 BPF_EXIT_INSN(),
1721 },
1722 .result = REJECT,
1723 .errstr = "same insn cannot be used with different pointers",
1724 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1725 },
1726 {
1727 "unpriv: spill/fill of different pointers ldx",
1728 .insns = {
1729 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1731 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1734 -(__s32)offsetof(struct bpf_perf_event_data,
1735 sample_period) - 8),
1736 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1737 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1738 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1739 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1740 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1741 offsetof(struct bpf_perf_event_data,
1742 sample_period)),
1743 BPF_MOV64_IMM(BPF_REG_0, 0),
1744 BPF_EXIT_INSN(),
1745 },
1746 .result = REJECT,
1747 .errstr = "same insn cannot be used with different pointers",
1748 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1749 },
1750 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001751 "unpriv: write pointer into map elem value",
1752 .insns = {
1753 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1756 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001757 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1758 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1760 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1761 BPF_EXIT_INSN(),
1762 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001763 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001764 .errstr_unpriv = "R0 leaks addr",
1765 .result_unpriv = REJECT,
1766 .result = ACCEPT,
1767 },
1768 {
1769 "unpriv: partial copy of pointer",
1770 .insns = {
1771 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1772 BPF_MOV64_IMM(BPF_REG_0, 0),
1773 BPF_EXIT_INSN(),
1774 },
1775 .errstr_unpriv = "R10 partial copy",
1776 .result_unpriv = REJECT,
1777 .result = ACCEPT,
1778 },
1779 {
1780 "unpriv: pass pointer to tail_call",
1781 .insns = {
1782 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1783 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001784 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1785 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001786 BPF_MOV64_IMM(BPF_REG_0, 0),
1787 BPF_EXIT_INSN(),
1788 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001789 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001790 .errstr_unpriv = "R3 leaks addr into helper",
1791 .result_unpriv = REJECT,
1792 .result = ACCEPT,
1793 },
1794 {
1795 "unpriv: cmp map pointer with zero",
1796 .insns = {
1797 BPF_MOV64_IMM(BPF_REG_1, 0),
1798 BPF_LD_MAP_FD(BPF_REG_1, 0),
1799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1800 BPF_MOV64_IMM(BPF_REG_0, 0),
1801 BPF_EXIT_INSN(),
1802 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001803 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001804 .errstr_unpriv = "R1 pointer comparison",
1805 .result_unpriv = REJECT,
1806 .result = ACCEPT,
1807 },
1808 {
1809 "unpriv: write into frame pointer",
1810 .insns = {
1811 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1812 BPF_MOV64_IMM(BPF_REG_0, 0),
1813 BPF_EXIT_INSN(),
1814 },
1815 .errstr = "frame pointer is read only",
1816 .result = REJECT,
1817 },
1818 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001819 "unpriv: spill/fill frame pointer",
1820 .insns = {
1821 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1823 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1824 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1825 BPF_MOV64_IMM(BPF_REG_0, 0),
1826 BPF_EXIT_INSN(),
1827 },
1828 .errstr = "frame pointer is read only",
1829 .result = REJECT,
1830 },
1831 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001832 "unpriv: cmp of frame pointer",
1833 .insns = {
1834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1835 BPF_MOV64_IMM(BPF_REG_0, 0),
1836 BPF_EXIT_INSN(),
1837 },
1838 .errstr_unpriv = "R10 pointer comparison",
1839 .result_unpriv = REJECT,
1840 .result = ACCEPT,
1841 },
1842 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02001843 "unpriv: adding of fp",
1844 .insns = {
1845 BPF_MOV64_IMM(BPF_REG_0, 0),
1846 BPF_MOV64_IMM(BPF_REG_1, 0),
1847 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1848 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
1849 BPF_EXIT_INSN(),
1850 },
1851 .errstr_unpriv = "pointer arithmetic prohibited",
1852 .result_unpriv = REJECT,
1853 .errstr = "R1 invalid mem access",
1854 .result = REJECT,
1855 },
1856 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001857 "unpriv: cmp of stack pointer",
1858 .insns = {
1859 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1862 BPF_MOV64_IMM(BPF_REG_0, 0),
1863 BPF_EXIT_INSN(),
1864 },
1865 .errstr_unpriv = "R2 pointer comparison",
1866 .result_unpriv = REJECT,
1867 .result = ACCEPT,
1868 },
1869 {
Yonghong Song332270f2017-04-29 22:52:42 -07001870 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001871 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07001872 BPF_MOV64_IMM(BPF_REG_1, 4),
1873 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1874 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
1875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1877 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1878 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
1879 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
1880 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
1882 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001883 BPF_MOV64_IMM(BPF_REG_0, 0),
1884 BPF_EXIT_INSN(),
1885 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001886 .result = ACCEPT,
1887 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001888 {
1889 "raw_stack: no skb_load_bytes",
1890 .insns = {
1891 BPF_MOV64_IMM(BPF_REG_2, 4),
1892 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1894 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1895 BPF_MOV64_IMM(BPF_REG_4, 8),
1896 /* Call to skb_load_bytes() omitted. */
1897 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1898 BPF_EXIT_INSN(),
1899 },
1900 .result = REJECT,
1901 .errstr = "invalid read from stack off -8+0 size 8",
1902 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1903 },
1904 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001905 "raw_stack: skb_load_bytes, negative len",
1906 .insns = {
1907 BPF_MOV64_IMM(BPF_REG_2, 4),
1908 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1910 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1911 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1913 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001914 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1915 BPF_EXIT_INSN(),
1916 },
1917 .result = REJECT,
1918 .errstr = "invalid stack type R3",
1919 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1920 },
1921 {
1922 "raw_stack: skb_load_bytes, negative len 2",
1923 .insns = {
1924 BPF_MOV64_IMM(BPF_REG_2, 4),
1925 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1927 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1928 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1930 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001931 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1932 BPF_EXIT_INSN(),
1933 },
1934 .result = REJECT,
1935 .errstr = "invalid stack type R3",
1936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1937 },
1938 {
1939 "raw_stack: skb_load_bytes, zero len",
1940 .insns = {
1941 BPF_MOV64_IMM(BPF_REG_2, 4),
1942 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1944 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1945 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1947 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001948 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1949 BPF_EXIT_INSN(),
1950 },
1951 .result = REJECT,
1952 .errstr = "invalid stack type R3",
1953 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1954 },
1955 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001956 "raw_stack: skb_load_bytes, no init",
1957 .insns = {
1958 BPF_MOV64_IMM(BPF_REG_2, 4),
1959 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1961 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1962 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001963 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1964 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001965 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1966 BPF_EXIT_INSN(),
1967 },
1968 .result = ACCEPT,
1969 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1970 },
1971 {
1972 "raw_stack: skb_load_bytes, init",
1973 .insns = {
1974 BPF_MOV64_IMM(BPF_REG_2, 4),
1975 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1977 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1978 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1979 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1981 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001982 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1983 BPF_EXIT_INSN(),
1984 },
1985 .result = ACCEPT,
1986 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1987 },
1988 {
1989 "raw_stack: skb_load_bytes, spilled regs around bounds",
1990 .insns = {
1991 BPF_MOV64_IMM(BPF_REG_2, 4),
1992 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001994 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1995 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001996 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1997 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001998 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1999 BPF_FUNC_skb_load_bytes),
2000 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2001 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002002 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2003 offsetof(struct __sk_buff, mark)),
2004 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2005 offsetof(struct __sk_buff, priority)),
2006 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2007 BPF_EXIT_INSN(),
2008 },
2009 .result = ACCEPT,
2010 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2011 },
2012 {
2013 "raw_stack: skb_load_bytes, spilled regs corruption",
2014 .insns = {
2015 BPF_MOV64_IMM(BPF_REG_2, 4),
2016 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002018 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002019 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2020 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2022 BPF_FUNC_skb_load_bytes),
2023 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002024 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2025 offsetof(struct __sk_buff, mark)),
2026 BPF_EXIT_INSN(),
2027 },
2028 .result = REJECT,
2029 .errstr = "R0 invalid mem access 'inv'",
2030 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2031 },
2032 {
2033 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2034 .insns = {
2035 BPF_MOV64_IMM(BPF_REG_2, 4),
2036 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002038 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2039 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2040 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002041 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2042 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002043 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2044 BPF_FUNC_skb_load_bytes),
2045 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2046 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2047 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002048 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2049 offsetof(struct __sk_buff, mark)),
2050 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2051 offsetof(struct __sk_buff, priority)),
2052 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2053 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2054 offsetof(struct __sk_buff, pkt_type)),
2055 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2056 BPF_EXIT_INSN(),
2057 },
2058 .result = REJECT,
2059 .errstr = "R3 invalid mem access 'inv'",
2060 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2061 },
2062 {
2063 "raw_stack: skb_load_bytes, spilled regs + data",
2064 .insns = {
2065 BPF_MOV64_IMM(BPF_REG_2, 4),
2066 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002068 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2069 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2070 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002071 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2072 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002073 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2074 BPF_FUNC_skb_load_bytes),
2075 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2076 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2077 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002078 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2079 offsetof(struct __sk_buff, mark)),
2080 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2081 offsetof(struct __sk_buff, priority)),
2082 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2083 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2084 BPF_EXIT_INSN(),
2085 },
2086 .result = ACCEPT,
2087 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2088 },
2089 {
2090 "raw_stack: skb_load_bytes, invalid access 1",
2091 .insns = {
2092 BPF_MOV64_IMM(BPF_REG_2, 4),
2093 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2095 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2096 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2098 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002099 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2100 BPF_EXIT_INSN(),
2101 },
2102 .result = REJECT,
2103 .errstr = "invalid stack type R3 off=-513 access_size=8",
2104 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2105 },
2106 {
2107 "raw_stack: skb_load_bytes, invalid access 2",
2108 .insns = {
2109 BPF_MOV64_IMM(BPF_REG_2, 4),
2110 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2112 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2113 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002114 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2115 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002116 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2117 BPF_EXIT_INSN(),
2118 },
2119 .result = REJECT,
2120 .errstr = "invalid stack type R3 off=-1 access_size=8",
2121 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2122 },
2123 {
2124 "raw_stack: skb_load_bytes, invalid access 3",
2125 .insns = {
2126 BPF_MOV64_IMM(BPF_REG_2, 4),
2127 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2129 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2130 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2132 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002133 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2134 BPF_EXIT_INSN(),
2135 },
2136 .result = REJECT,
2137 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2138 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2139 },
2140 {
2141 "raw_stack: skb_load_bytes, invalid access 4",
2142 .insns = {
2143 BPF_MOV64_IMM(BPF_REG_2, 4),
2144 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2146 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2147 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2149 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2151 BPF_EXIT_INSN(),
2152 },
2153 .result = REJECT,
2154 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2155 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2156 },
2157 {
2158 "raw_stack: skb_load_bytes, invalid access 5",
2159 .insns = {
2160 BPF_MOV64_IMM(BPF_REG_2, 4),
2161 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2163 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2164 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2166 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002167 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2168 BPF_EXIT_INSN(),
2169 },
2170 .result = REJECT,
2171 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2172 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2173 },
2174 {
2175 "raw_stack: skb_load_bytes, invalid access 6",
2176 .insns = {
2177 BPF_MOV64_IMM(BPF_REG_2, 4),
2178 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2181 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2183 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002184 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2185 BPF_EXIT_INSN(),
2186 },
2187 .result = REJECT,
2188 .errstr = "invalid stack type R3 off=-512 access_size=0",
2189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2190 },
2191 {
2192 "raw_stack: skb_load_bytes, large access",
2193 .insns = {
2194 BPF_MOV64_IMM(BPF_REG_2, 4),
2195 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2197 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2198 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2200 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2202 BPF_EXIT_INSN(),
2203 },
2204 .result = ACCEPT,
2205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2206 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002207 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002208 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002209 .insns = {
2210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2211 offsetof(struct __sk_buff, data)),
2212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2213 offsetof(struct __sk_buff, data_end)),
2214 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2216 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2217 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2218 BPF_MOV64_IMM(BPF_REG_0, 0),
2219 BPF_EXIT_INSN(),
2220 },
2221 .result = ACCEPT,
2222 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2223 },
2224 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002225 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002226 .insns = {
2227 BPF_MOV64_IMM(BPF_REG_0, 1),
2228 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2229 offsetof(struct __sk_buff, data_end)),
2230 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2231 offsetof(struct __sk_buff, data)),
2232 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2234 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2235 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2236 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2237 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2238 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2239 offsetof(struct __sk_buff, data)),
2240 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2242 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2243 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2244 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2247 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2248 offsetof(struct __sk_buff, data_end)),
2249 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2250 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2251 BPF_MOV64_IMM(BPF_REG_0, 0),
2252 BPF_EXIT_INSN(),
2253 },
2254 .result = ACCEPT,
2255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2256 },
2257 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002258 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002259 .insns = {
2260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2261 offsetof(struct __sk_buff, data)),
2262 BPF_MOV64_IMM(BPF_REG_0, 0),
2263 BPF_EXIT_INSN(),
2264 },
2265 .errstr = "invalid bpf_context access off=76",
2266 .result = REJECT,
2267 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2268 },
2269 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002270 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002271 .insns = {
2272 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2273 offsetof(struct __sk_buff, data)),
2274 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2275 offsetof(struct __sk_buff, data_end)),
2276 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2278 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2279 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2280 BPF_MOV64_IMM(BPF_REG_0, 0),
2281 BPF_EXIT_INSN(),
2282 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002283 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002284 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2285 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002286 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002287 "direct packet access: test5 (pkt_end >= reg, good access)",
2288 .insns = {
2289 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2290 offsetof(struct __sk_buff, data)),
2291 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2292 offsetof(struct __sk_buff, data_end)),
2293 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2295 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2296 BPF_MOV64_IMM(BPF_REG_0, 1),
2297 BPF_EXIT_INSN(),
2298 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2299 BPF_MOV64_IMM(BPF_REG_0, 0),
2300 BPF_EXIT_INSN(),
2301 },
2302 .result = ACCEPT,
2303 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2304 },
2305 {
2306 "direct packet access: test6 (pkt_end >= reg, bad access)",
2307 .insns = {
2308 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2309 offsetof(struct __sk_buff, data)),
2310 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2311 offsetof(struct __sk_buff, data_end)),
2312 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2314 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2315 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2316 BPF_MOV64_IMM(BPF_REG_0, 1),
2317 BPF_EXIT_INSN(),
2318 BPF_MOV64_IMM(BPF_REG_0, 0),
2319 BPF_EXIT_INSN(),
2320 },
2321 .errstr = "invalid access to packet",
2322 .result = REJECT,
2323 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2324 },
2325 {
2326 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2327 .insns = {
2328 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2329 offsetof(struct __sk_buff, data)),
2330 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2331 offsetof(struct __sk_buff, data_end)),
2332 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2334 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2335 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2336 BPF_MOV64_IMM(BPF_REG_0, 1),
2337 BPF_EXIT_INSN(),
2338 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2339 BPF_MOV64_IMM(BPF_REG_0, 0),
2340 BPF_EXIT_INSN(),
2341 },
2342 .errstr = "invalid access to packet",
2343 .result = REJECT,
2344 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2345 },
2346 {
2347 "direct packet access: test8 (double test, variant 1)",
2348 .insns = {
2349 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2350 offsetof(struct __sk_buff, data)),
2351 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2352 offsetof(struct __sk_buff, data_end)),
2353 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2355 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2356 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2357 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2358 BPF_MOV64_IMM(BPF_REG_0, 1),
2359 BPF_EXIT_INSN(),
2360 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2361 BPF_MOV64_IMM(BPF_REG_0, 0),
2362 BPF_EXIT_INSN(),
2363 },
2364 .result = ACCEPT,
2365 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2366 },
2367 {
2368 "direct packet access: test9 (double test, variant 2)",
2369 .insns = {
2370 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2371 offsetof(struct __sk_buff, data)),
2372 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2373 offsetof(struct __sk_buff, data_end)),
2374 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2376 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2377 BPF_MOV64_IMM(BPF_REG_0, 1),
2378 BPF_EXIT_INSN(),
2379 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2380 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2381 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2382 BPF_MOV64_IMM(BPF_REG_0, 0),
2383 BPF_EXIT_INSN(),
2384 },
2385 .result = ACCEPT,
2386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2387 },
2388 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002389 "direct packet access: test10 (write invalid)",
2390 .insns = {
2391 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2392 offsetof(struct __sk_buff, data)),
2393 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2394 offsetof(struct __sk_buff, data_end)),
2395 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2397 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2398 BPF_MOV64_IMM(BPF_REG_0, 0),
2399 BPF_EXIT_INSN(),
2400 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2401 BPF_MOV64_IMM(BPF_REG_0, 0),
2402 BPF_EXIT_INSN(),
2403 },
2404 .errstr = "invalid access to packet",
2405 .result = REJECT,
2406 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2407 },
2408 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002409 "direct packet access: test11 (shift, good access)",
2410 .insns = {
2411 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2412 offsetof(struct __sk_buff, data)),
2413 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2414 offsetof(struct __sk_buff, data_end)),
2415 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2417 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2418 BPF_MOV64_IMM(BPF_REG_3, 144),
2419 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2421 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2422 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2423 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2424 BPF_MOV64_IMM(BPF_REG_0, 1),
2425 BPF_EXIT_INSN(),
2426 BPF_MOV64_IMM(BPF_REG_0, 0),
2427 BPF_EXIT_INSN(),
2428 },
2429 .result = ACCEPT,
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2431 },
2432 {
2433 "direct packet access: test12 (and, good access)",
2434 .insns = {
2435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2436 offsetof(struct __sk_buff, data)),
2437 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2438 offsetof(struct __sk_buff, data_end)),
2439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2441 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2442 BPF_MOV64_IMM(BPF_REG_3, 144),
2443 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2445 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2446 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2447 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2448 BPF_MOV64_IMM(BPF_REG_0, 1),
2449 BPF_EXIT_INSN(),
2450 BPF_MOV64_IMM(BPF_REG_0, 0),
2451 BPF_EXIT_INSN(),
2452 },
2453 .result = ACCEPT,
2454 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2455 },
2456 {
2457 "direct packet access: test13 (branches, good access)",
2458 .insns = {
2459 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2460 offsetof(struct __sk_buff, data)),
2461 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2462 offsetof(struct __sk_buff, data_end)),
2463 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2465 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2466 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2467 offsetof(struct __sk_buff, mark)),
2468 BPF_MOV64_IMM(BPF_REG_4, 1),
2469 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2470 BPF_MOV64_IMM(BPF_REG_3, 14),
2471 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2472 BPF_MOV64_IMM(BPF_REG_3, 24),
2473 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2475 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2476 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2477 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2478 BPF_MOV64_IMM(BPF_REG_0, 1),
2479 BPF_EXIT_INSN(),
2480 BPF_MOV64_IMM(BPF_REG_0, 0),
2481 BPF_EXIT_INSN(),
2482 },
2483 .result = ACCEPT,
2484 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2485 },
2486 {
William Tu63dfef72017-02-04 08:37:29 -08002487 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2488 .insns = {
2489 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2490 offsetof(struct __sk_buff, data)),
2491 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2492 offsetof(struct __sk_buff, data_end)),
2493 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2495 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2496 BPF_MOV64_IMM(BPF_REG_5, 12),
2497 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2498 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2499 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2500 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2501 BPF_MOV64_IMM(BPF_REG_0, 1),
2502 BPF_EXIT_INSN(),
2503 BPF_MOV64_IMM(BPF_REG_0, 0),
2504 BPF_EXIT_INSN(),
2505 },
2506 .result = ACCEPT,
2507 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2508 },
2509 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002510 "direct packet access: test15 (spill with xadd)",
2511 .insns = {
2512 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2513 offsetof(struct __sk_buff, data)),
2514 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2515 offsetof(struct __sk_buff, data_end)),
2516 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2518 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2519 BPF_MOV64_IMM(BPF_REG_5, 4096),
2520 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2522 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2523 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2524 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2525 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2526 BPF_MOV64_IMM(BPF_REG_0, 0),
2527 BPF_EXIT_INSN(),
2528 },
2529 .errstr = "R2 invalid mem access 'inv'",
2530 .result = REJECT,
2531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2532 },
2533 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002534 "direct packet access: test16 (arith on data_end)",
2535 .insns = {
2536 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2537 offsetof(struct __sk_buff, data)),
2538 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2539 offsetof(struct __sk_buff, data_end)),
2540 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2543 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2544 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2545 BPF_MOV64_IMM(BPF_REG_0, 0),
2546 BPF_EXIT_INSN(),
2547 },
2548 .errstr = "invalid access to packet",
2549 .result = REJECT,
2550 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2551 },
2552 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002553 "direct packet access: test17 (pruning, alignment)",
2554 .insns = {
2555 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2556 offsetof(struct __sk_buff, data)),
2557 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2558 offsetof(struct __sk_buff, data_end)),
2559 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2560 offsetof(struct __sk_buff, mark)),
2561 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2563 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2564 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2565 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2566 BPF_MOV64_IMM(BPF_REG_0, 0),
2567 BPF_EXIT_INSN(),
2568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2569 BPF_JMP_A(-6),
2570 },
2571 .errstr = "misaligned packet access off 2+15+-4 size 4",
2572 .result = REJECT,
2573 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2574 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2575 },
2576 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002577 "helper access to packet: test1, valid packet_ptr range",
2578 .insns = {
2579 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2580 offsetof(struct xdp_md, data)),
2581 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2582 offsetof(struct xdp_md, data_end)),
2583 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2585 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2586 BPF_LD_MAP_FD(BPF_REG_1, 0),
2587 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2588 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002589 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2590 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002591 BPF_MOV64_IMM(BPF_REG_0, 0),
2592 BPF_EXIT_INSN(),
2593 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002594 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002595 .result_unpriv = ACCEPT,
2596 .result = ACCEPT,
2597 .prog_type = BPF_PROG_TYPE_XDP,
2598 },
2599 {
2600 "helper access to packet: test2, unchecked packet_ptr",
2601 .insns = {
2602 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2603 offsetof(struct xdp_md, data)),
2604 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002605 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2606 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002607 BPF_MOV64_IMM(BPF_REG_0, 0),
2608 BPF_EXIT_INSN(),
2609 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002610 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002611 .result = REJECT,
2612 .errstr = "invalid access to packet",
2613 .prog_type = BPF_PROG_TYPE_XDP,
2614 },
2615 {
2616 "helper access to packet: test3, variable add",
2617 .insns = {
2618 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2619 offsetof(struct xdp_md, data)),
2620 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2621 offsetof(struct xdp_md, data_end)),
2622 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2624 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2625 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2626 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2627 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2628 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2630 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2631 BPF_LD_MAP_FD(BPF_REG_1, 0),
2632 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2634 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002635 BPF_MOV64_IMM(BPF_REG_0, 0),
2636 BPF_EXIT_INSN(),
2637 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002638 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002639 .result = ACCEPT,
2640 .prog_type = BPF_PROG_TYPE_XDP,
2641 },
2642 {
2643 "helper access to packet: test4, packet_ptr with bad range",
2644 .insns = {
2645 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2646 offsetof(struct xdp_md, data)),
2647 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2648 offsetof(struct xdp_md, data_end)),
2649 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2651 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2652 BPF_MOV64_IMM(BPF_REG_0, 0),
2653 BPF_EXIT_INSN(),
2654 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2656 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002657 BPF_MOV64_IMM(BPF_REG_0, 0),
2658 BPF_EXIT_INSN(),
2659 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002660 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002661 .result = REJECT,
2662 .errstr = "invalid access to packet",
2663 .prog_type = BPF_PROG_TYPE_XDP,
2664 },
2665 {
2666 "helper access to packet: test5, packet_ptr with too short range",
2667 .insns = {
2668 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2669 offsetof(struct xdp_md, data)),
2670 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2671 offsetof(struct xdp_md, data_end)),
2672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2673 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2675 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2676 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2678 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002679 BPF_MOV64_IMM(BPF_REG_0, 0),
2680 BPF_EXIT_INSN(),
2681 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002682 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002683 .result = REJECT,
2684 .errstr = "invalid access to packet",
2685 .prog_type = BPF_PROG_TYPE_XDP,
2686 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002687 {
2688 "helper access to packet: test6, cls valid packet_ptr range",
2689 .insns = {
2690 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2691 offsetof(struct __sk_buff, data)),
2692 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2693 offsetof(struct __sk_buff, data_end)),
2694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2696 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2697 BPF_LD_MAP_FD(BPF_REG_1, 0),
2698 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2699 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2701 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002702 BPF_MOV64_IMM(BPF_REG_0, 0),
2703 BPF_EXIT_INSN(),
2704 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002705 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002706 .result = ACCEPT,
2707 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2708 },
2709 {
2710 "helper access to packet: test7, cls unchecked packet_ptr",
2711 .insns = {
2712 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2713 offsetof(struct __sk_buff, data)),
2714 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2716 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002717 BPF_MOV64_IMM(BPF_REG_0, 0),
2718 BPF_EXIT_INSN(),
2719 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002720 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002721 .result = REJECT,
2722 .errstr = "invalid access to packet",
2723 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2724 },
2725 {
2726 "helper access to packet: test8, cls variable add",
2727 .insns = {
2728 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2729 offsetof(struct __sk_buff, data)),
2730 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2731 offsetof(struct __sk_buff, data_end)),
2732 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2734 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2735 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2736 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2737 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2738 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2740 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2741 BPF_LD_MAP_FD(BPF_REG_1, 0),
2742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2744 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002745 BPF_MOV64_IMM(BPF_REG_0, 0),
2746 BPF_EXIT_INSN(),
2747 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002748 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002749 .result = ACCEPT,
2750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2751 },
2752 {
2753 "helper access to packet: test9, cls packet_ptr with bad range",
2754 .insns = {
2755 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2756 offsetof(struct __sk_buff, data)),
2757 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2758 offsetof(struct __sk_buff, data_end)),
2759 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2761 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2762 BPF_MOV64_IMM(BPF_REG_0, 0),
2763 BPF_EXIT_INSN(),
2764 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002765 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2766 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002767 BPF_MOV64_IMM(BPF_REG_0, 0),
2768 BPF_EXIT_INSN(),
2769 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002770 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002771 .result = REJECT,
2772 .errstr = "invalid access to packet",
2773 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2774 },
2775 {
2776 "helper access to packet: test10, cls packet_ptr with too short range",
2777 .insns = {
2778 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2779 offsetof(struct __sk_buff, data)),
2780 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2781 offsetof(struct __sk_buff, data_end)),
2782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2783 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2784 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2785 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2786 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002787 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2788 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002789 BPF_MOV64_IMM(BPF_REG_0, 0),
2790 BPF_EXIT_INSN(),
2791 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002792 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002793 .result = REJECT,
2794 .errstr = "invalid access to packet",
2795 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2796 },
2797 {
2798 "helper access to packet: test11, cls unsuitable helper 1",
2799 .insns = {
2800 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2801 offsetof(struct __sk_buff, data)),
2802 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2803 offsetof(struct __sk_buff, data_end)),
2804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2805 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2807 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2808 BPF_MOV64_IMM(BPF_REG_2, 0),
2809 BPF_MOV64_IMM(BPF_REG_4, 42),
2810 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2812 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002813 BPF_MOV64_IMM(BPF_REG_0, 0),
2814 BPF_EXIT_INSN(),
2815 },
2816 .result = REJECT,
2817 .errstr = "helper access to the packet",
2818 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2819 },
2820 {
2821 "helper access to packet: test12, cls unsuitable helper 2",
2822 .insns = {
2823 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2824 offsetof(struct __sk_buff, data)),
2825 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2826 offsetof(struct __sk_buff, data_end)),
2827 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2829 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2830 BPF_MOV64_IMM(BPF_REG_2, 0),
2831 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2833 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002834 BPF_MOV64_IMM(BPF_REG_0, 0),
2835 BPF_EXIT_INSN(),
2836 },
2837 .result = REJECT,
2838 .errstr = "helper access to the packet",
2839 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2840 },
2841 {
2842 "helper access to packet: test13, cls helper ok",
2843 .insns = {
2844 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2845 offsetof(struct __sk_buff, data)),
2846 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2847 offsetof(struct __sk_buff, data_end)),
2848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2851 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2852 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2853 BPF_MOV64_IMM(BPF_REG_2, 4),
2854 BPF_MOV64_IMM(BPF_REG_3, 0),
2855 BPF_MOV64_IMM(BPF_REG_4, 0),
2856 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002857 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2858 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002859 BPF_MOV64_IMM(BPF_REG_0, 0),
2860 BPF_EXIT_INSN(),
2861 },
2862 .result = ACCEPT,
2863 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2864 },
2865 {
2866 "helper access to packet: test14, cls helper fail sub",
2867 .insns = {
2868 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2869 offsetof(struct __sk_buff, data)),
2870 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2871 offsetof(struct __sk_buff, data_end)),
2872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2873 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2875 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2876 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2877 BPF_MOV64_IMM(BPF_REG_2, 4),
2878 BPF_MOV64_IMM(BPF_REG_3, 0),
2879 BPF_MOV64_IMM(BPF_REG_4, 0),
2880 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002881 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2882 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002883 BPF_MOV64_IMM(BPF_REG_0, 0),
2884 BPF_EXIT_INSN(),
2885 },
2886 .result = REJECT,
2887 .errstr = "type=inv expected=fp",
2888 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2889 },
2890 {
2891 "helper access to packet: test15, cls helper fail range 1",
2892 .insns = {
2893 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2894 offsetof(struct __sk_buff, data)),
2895 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2896 offsetof(struct __sk_buff, data_end)),
2897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2900 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2901 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2902 BPF_MOV64_IMM(BPF_REG_2, 8),
2903 BPF_MOV64_IMM(BPF_REG_3, 0),
2904 BPF_MOV64_IMM(BPF_REG_4, 0),
2905 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002906 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2907 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002908 BPF_MOV64_IMM(BPF_REG_0, 0),
2909 BPF_EXIT_INSN(),
2910 },
2911 .result = REJECT,
2912 .errstr = "invalid access to packet",
2913 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2914 },
2915 {
2916 "helper access to packet: test16, cls helper fail range 2",
2917 .insns = {
2918 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2919 offsetof(struct __sk_buff, data)),
2920 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2921 offsetof(struct __sk_buff, data_end)),
2922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2925 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2927 BPF_MOV64_IMM(BPF_REG_2, -9),
2928 BPF_MOV64_IMM(BPF_REG_3, 0),
2929 BPF_MOV64_IMM(BPF_REG_4, 0),
2930 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2932 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002933 BPF_MOV64_IMM(BPF_REG_0, 0),
2934 BPF_EXIT_INSN(),
2935 },
2936 .result = REJECT,
2937 .errstr = "invalid access to packet",
2938 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2939 },
2940 {
2941 "helper access to packet: test17, cls helper fail range 3",
2942 .insns = {
2943 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2944 offsetof(struct __sk_buff, data)),
2945 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2946 offsetof(struct __sk_buff, data_end)),
2947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2948 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2950 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2952 BPF_MOV64_IMM(BPF_REG_2, ~0),
2953 BPF_MOV64_IMM(BPF_REG_3, 0),
2954 BPF_MOV64_IMM(BPF_REG_4, 0),
2955 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2957 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002958 BPF_MOV64_IMM(BPF_REG_0, 0),
2959 BPF_EXIT_INSN(),
2960 },
2961 .result = REJECT,
2962 .errstr = "invalid access to packet",
2963 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2964 },
2965 {
2966 "helper access to packet: test18, cls helper fail range zero",
2967 .insns = {
2968 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2969 offsetof(struct __sk_buff, data)),
2970 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2971 offsetof(struct __sk_buff, data_end)),
2972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2975 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2977 BPF_MOV64_IMM(BPF_REG_2, 0),
2978 BPF_MOV64_IMM(BPF_REG_3, 0),
2979 BPF_MOV64_IMM(BPF_REG_4, 0),
2980 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2982 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002983 BPF_MOV64_IMM(BPF_REG_0, 0),
2984 BPF_EXIT_INSN(),
2985 },
2986 .result = REJECT,
2987 .errstr = "invalid access to packet",
2988 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2989 },
2990 {
2991 "helper access to packet: test19, pkt end as input",
2992 .insns = {
2993 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2994 offsetof(struct __sk_buff, data)),
2995 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2996 offsetof(struct __sk_buff, data_end)),
2997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3000 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3001 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3002 BPF_MOV64_IMM(BPF_REG_2, 4),
3003 BPF_MOV64_IMM(BPF_REG_3, 0),
3004 BPF_MOV64_IMM(BPF_REG_4, 0),
3005 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3007 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003008 BPF_MOV64_IMM(BPF_REG_0, 0),
3009 BPF_EXIT_INSN(),
3010 },
3011 .result = REJECT,
3012 .errstr = "R1 type=pkt_end expected=fp",
3013 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3014 },
3015 {
3016 "helper access to packet: test20, wrong reg",
3017 .insns = {
3018 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3019 offsetof(struct __sk_buff, data)),
3020 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3021 offsetof(struct __sk_buff, data_end)),
3022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3023 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3025 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3026 BPF_MOV64_IMM(BPF_REG_2, 4),
3027 BPF_MOV64_IMM(BPF_REG_3, 0),
3028 BPF_MOV64_IMM(BPF_REG_4, 0),
3029 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003030 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3031 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003032 BPF_MOV64_IMM(BPF_REG_0, 0),
3033 BPF_EXIT_INSN(),
3034 },
3035 .result = REJECT,
3036 .errstr = "invalid access to packet",
3037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3038 },
Josef Bacik48461132016-09-28 10:54:32 -04003039 {
3040 "valid map access into an array with a constant",
3041 .insns = {
3042 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3043 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3044 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3045 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003046 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3047 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003048 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003049 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3050 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003051 BPF_EXIT_INSN(),
3052 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003053 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003054 .errstr_unpriv = "R0 leaks addr",
3055 .result_unpriv = REJECT,
3056 .result = ACCEPT,
3057 },
3058 {
3059 "valid map access into an array with a register",
3060 .insns = {
3061 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3062 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3064 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3066 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003067 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3068 BPF_MOV64_IMM(BPF_REG_1, 4),
3069 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3070 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003071 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3072 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003073 BPF_EXIT_INSN(),
3074 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003075 .fixup_map2 = { 3 },
3076 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003077 .result_unpriv = REJECT,
3078 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003079 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003080 },
3081 {
3082 "valid map access into an array with a variable",
3083 .insns = {
3084 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3085 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3087 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3089 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3091 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3092 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3093 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3094 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003095 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3096 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003097 BPF_EXIT_INSN(),
3098 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003099 .fixup_map2 = { 3 },
3100 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003101 .result_unpriv = REJECT,
3102 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003103 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003104 },
3105 {
3106 "valid map access into an array with a signed variable",
3107 .insns = {
3108 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3111 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003112 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3113 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003114 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3115 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3116 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3117 BPF_MOV32_IMM(BPF_REG_1, 0),
3118 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3119 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3120 BPF_MOV32_IMM(BPF_REG_1, 0),
3121 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3122 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003123 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3124 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003125 BPF_EXIT_INSN(),
3126 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003127 .fixup_map2 = { 3 },
3128 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003129 .result_unpriv = REJECT,
3130 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003131 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003132 },
3133 {
3134 "invalid map access into an array with a constant",
3135 .insns = {
3136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3139 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3141 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3143 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3144 offsetof(struct test_val, foo)),
3145 BPF_EXIT_INSN(),
3146 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003147 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003148 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3149 .result = REJECT,
3150 },
3151 {
3152 "invalid map access into an array with a register",
3153 .insns = {
3154 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3155 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3157 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3159 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3161 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3162 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3163 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003164 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3165 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003166 BPF_EXIT_INSN(),
3167 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003168 .fixup_map2 = { 3 },
3169 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003170 .errstr = "R0 min value is outside of the array range",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003171 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003172 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003173 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003174 },
3175 {
3176 "invalid map access into an array with a variable",
3177 .insns = {
3178 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3181 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3185 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3186 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3187 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003188 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3189 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003190 BPF_EXIT_INSN(),
3191 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003192 .fixup_map2 = { 3 },
3193 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003194 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003195 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003196 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003197 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003198 },
3199 {
3200 "invalid map access into an array with no floor check",
3201 .insns = {
3202 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3205 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3207 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3209 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3210 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3211 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3212 BPF_MOV32_IMM(BPF_REG_1, 0),
3213 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3214 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003215 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3216 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003217 BPF_EXIT_INSN(),
3218 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003219 .fixup_map2 = { 3 },
3220 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003221 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003222 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003223 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003224 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003225 },
3226 {
3227 "invalid map access into an array with a invalid max check",
3228 .insns = {
3229 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3230 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3232 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3234 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3236 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3237 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3238 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3239 BPF_MOV32_IMM(BPF_REG_1, 0),
3240 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3241 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003242 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3243 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003244 BPF_EXIT_INSN(),
3245 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003246 .fixup_map2 = { 3 },
3247 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003248 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003249 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003250 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003251 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003252 },
3253 {
3254 "invalid map access into an array with a invalid max check",
3255 .insns = {
3256 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3257 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3259 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3261 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003262 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3263 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3264 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3265 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3267 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3269 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3271 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003272 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3273 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003274 BPF_EXIT_INSN(),
3275 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003276 .fixup_map2 = { 3, 11 },
3277 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003278 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003279 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003280 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003281 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003282 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003283 {
3284 "multiple registers share map_lookup_elem result",
3285 .insns = {
3286 BPF_MOV64_IMM(BPF_REG_1, 10),
3287 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3290 BPF_LD_MAP_FD(BPF_REG_1, 0),
3291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3292 BPF_FUNC_map_lookup_elem),
3293 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3295 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3296 BPF_EXIT_INSN(),
3297 },
3298 .fixup_map1 = { 4 },
3299 .result = ACCEPT,
3300 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3301 },
3302 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003303 "alu ops on ptr_to_map_value_or_null, 1",
3304 .insns = {
3305 BPF_MOV64_IMM(BPF_REG_1, 10),
3306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3309 BPF_LD_MAP_FD(BPF_REG_1, 0),
3310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3311 BPF_FUNC_map_lookup_elem),
3312 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3316 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3317 BPF_EXIT_INSN(),
3318 },
3319 .fixup_map1 = { 4 },
3320 .errstr = "R4 invalid mem access",
3321 .result = REJECT,
3322 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3323 },
3324 {
3325 "alu ops on ptr_to_map_value_or_null, 2",
3326 .insns = {
3327 BPF_MOV64_IMM(BPF_REG_1, 10),
3328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3329 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3331 BPF_LD_MAP_FD(BPF_REG_1, 0),
3332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3333 BPF_FUNC_map_lookup_elem),
3334 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3335 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3336 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3337 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3338 BPF_EXIT_INSN(),
3339 },
3340 .fixup_map1 = { 4 },
3341 .errstr = "R4 invalid mem access",
3342 .result = REJECT,
3343 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3344 },
3345 {
3346 "alu ops on ptr_to_map_value_or_null, 3",
3347 .insns = {
3348 BPF_MOV64_IMM(BPF_REG_1, 10),
3349 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3350 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3352 BPF_LD_MAP_FD(BPF_REG_1, 0),
3353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3354 BPF_FUNC_map_lookup_elem),
3355 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3356 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3357 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3358 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3359 BPF_EXIT_INSN(),
3360 },
3361 .fixup_map1 = { 4 },
3362 .errstr = "R4 invalid mem access",
3363 .result = REJECT,
3364 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3365 },
3366 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02003367 "invalid memory access with multiple map_lookup_elem calls",
3368 .insns = {
3369 BPF_MOV64_IMM(BPF_REG_1, 10),
3370 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3371 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3373 BPF_LD_MAP_FD(BPF_REG_1, 0),
3374 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3375 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3377 BPF_FUNC_map_lookup_elem),
3378 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3382 BPF_FUNC_map_lookup_elem),
3383 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3384 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3385 BPF_EXIT_INSN(),
3386 },
3387 .fixup_map1 = { 4 },
3388 .result = REJECT,
3389 .errstr = "R4 !read_ok",
3390 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3391 },
3392 {
3393 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3394 .insns = {
3395 BPF_MOV64_IMM(BPF_REG_1, 10),
3396 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3397 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3399 BPF_LD_MAP_FD(BPF_REG_1, 0),
3400 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3401 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3402 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3403 BPF_FUNC_map_lookup_elem),
3404 BPF_MOV64_IMM(BPF_REG_2, 10),
3405 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3409 BPF_FUNC_map_lookup_elem),
3410 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3411 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3412 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3413 BPF_EXIT_INSN(),
3414 },
3415 .fixup_map1 = { 4 },
3416 .result = ACCEPT,
3417 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3418 },
Josef Bacike9548902016-11-29 12:35:19 -05003419 {
Daniel Borkmanna08dd0d2016-12-15 01:30:06 +01003420 "multiple registers share map_lookup_elem bad reg type",
3421 .insns = {
3422 BPF_MOV64_IMM(BPF_REG_1, 10),
3423 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3424 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3426 BPF_LD_MAP_FD(BPF_REG_1, 0),
3427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3428 BPF_FUNC_map_lookup_elem),
3429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3430 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3431 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3432 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3434 BPF_MOV64_IMM(BPF_REG_1, 1),
3435 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3436 BPF_MOV64_IMM(BPF_REG_1, 2),
3437 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3438 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3439 BPF_MOV64_IMM(BPF_REG_1, 3),
3440 BPF_EXIT_INSN(),
3441 },
3442 .fixup_map1 = { 4 },
3443 .result = REJECT,
3444 .errstr = "R3 invalid mem access 'inv'",
3445 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3446 },
3447 {
Josef Bacike9548902016-11-29 12:35:19 -05003448 "invalid map access from else condition",
3449 .insns = {
3450 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3453 BPF_LD_MAP_FD(BPF_REG_1, 0),
3454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3455 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3456 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3457 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3459 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3460 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3461 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3462 BPF_EXIT_INSN(),
3463 },
3464 .fixup_map2 = { 3 },
3465 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3466 .result = REJECT,
3467 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3468 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003469 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05003470 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08003471 {
3472 "constant register |= constant should keep constant type",
3473 .insns = {
3474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3476 BPF_MOV64_IMM(BPF_REG_2, 34),
3477 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3478 BPF_MOV64_IMM(BPF_REG_3, 0),
3479 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3480 BPF_EXIT_INSN(),
3481 },
3482 .result = ACCEPT,
3483 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3484 },
3485 {
3486 "constant register |= constant should not bypass stack boundary checks",
3487 .insns = {
3488 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3490 BPF_MOV64_IMM(BPF_REG_2, 34),
3491 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3492 BPF_MOV64_IMM(BPF_REG_3, 0),
3493 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3494 BPF_EXIT_INSN(),
3495 },
3496 .errstr = "invalid stack type R1 off=-48 access_size=58",
3497 .result = REJECT,
3498 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3499 },
3500 {
3501 "constant register |= constant register should keep constant type",
3502 .insns = {
3503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3505 BPF_MOV64_IMM(BPF_REG_2, 34),
3506 BPF_MOV64_IMM(BPF_REG_4, 13),
3507 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3508 BPF_MOV64_IMM(BPF_REG_3, 0),
3509 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3510 BPF_EXIT_INSN(),
3511 },
3512 .result = ACCEPT,
3513 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3514 },
3515 {
3516 "constant register |= constant register should not bypass stack boundary checks",
3517 .insns = {
3518 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3520 BPF_MOV64_IMM(BPF_REG_2, 34),
3521 BPF_MOV64_IMM(BPF_REG_4, 24),
3522 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3523 BPF_MOV64_IMM(BPF_REG_3, 0),
3524 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3525 BPF_EXIT_INSN(),
3526 },
3527 .errstr = "invalid stack type R1 off=-48 access_size=58",
3528 .result = REJECT,
3529 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3530 },
Thomas Graf3f731d82016-12-05 10:30:52 +01003531 {
3532 "invalid direct packet write for LWT_IN",
3533 .insns = {
3534 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3535 offsetof(struct __sk_buff, data)),
3536 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3537 offsetof(struct __sk_buff, data_end)),
3538 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3540 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3541 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3542 BPF_MOV64_IMM(BPF_REG_0, 0),
3543 BPF_EXIT_INSN(),
3544 },
3545 .errstr = "cannot write into packet",
3546 .result = REJECT,
3547 .prog_type = BPF_PROG_TYPE_LWT_IN,
3548 },
3549 {
3550 "invalid direct packet write for LWT_OUT",
3551 .insns = {
3552 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3553 offsetof(struct __sk_buff, data)),
3554 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3555 offsetof(struct __sk_buff, data_end)),
3556 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3558 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3559 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3560 BPF_MOV64_IMM(BPF_REG_0, 0),
3561 BPF_EXIT_INSN(),
3562 },
3563 .errstr = "cannot write into packet",
3564 .result = REJECT,
3565 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3566 },
3567 {
3568 "direct packet write for LWT_XMIT",
3569 .insns = {
3570 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3571 offsetof(struct __sk_buff, data)),
3572 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3573 offsetof(struct __sk_buff, data_end)),
3574 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3576 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3577 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3578 BPF_MOV64_IMM(BPF_REG_0, 0),
3579 BPF_EXIT_INSN(),
3580 },
3581 .result = ACCEPT,
3582 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3583 },
3584 {
3585 "direct packet read for LWT_IN",
3586 .insns = {
3587 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3588 offsetof(struct __sk_buff, data)),
3589 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3590 offsetof(struct __sk_buff, data_end)),
3591 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3593 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3594 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3595 BPF_MOV64_IMM(BPF_REG_0, 0),
3596 BPF_EXIT_INSN(),
3597 },
3598 .result = ACCEPT,
3599 .prog_type = BPF_PROG_TYPE_LWT_IN,
3600 },
3601 {
3602 "direct packet read for LWT_OUT",
3603 .insns = {
3604 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3605 offsetof(struct __sk_buff, data)),
3606 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3607 offsetof(struct __sk_buff, data_end)),
3608 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3610 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3611 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3612 BPF_MOV64_IMM(BPF_REG_0, 0),
3613 BPF_EXIT_INSN(),
3614 },
3615 .result = ACCEPT,
3616 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3617 },
3618 {
3619 "direct packet read for LWT_XMIT",
3620 .insns = {
3621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3622 offsetof(struct __sk_buff, data)),
3623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3624 offsetof(struct __sk_buff, data_end)),
3625 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3627 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3628 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3629 BPF_MOV64_IMM(BPF_REG_0, 0),
3630 BPF_EXIT_INSN(),
3631 },
3632 .result = ACCEPT,
3633 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3634 },
3635 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07003636 "overlapping checks for direct packet access",
3637 .insns = {
3638 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3639 offsetof(struct __sk_buff, data)),
3640 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3641 offsetof(struct __sk_buff, data_end)),
3642 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3644 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3647 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3648 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3649 BPF_MOV64_IMM(BPF_REG_0, 0),
3650 BPF_EXIT_INSN(),
3651 },
3652 .result = ACCEPT,
3653 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3654 },
3655 {
Thomas Graf3f731d82016-12-05 10:30:52 +01003656 "invalid access of tc_classid for LWT_IN",
3657 .insns = {
3658 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3659 offsetof(struct __sk_buff, tc_classid)),
3660 BPF_EXIT_INSN(),
3661 },
3662 .result = REJECT,
3663 .errstr = "invalid bpf_context access",
3664 },
3665 {
3666 "invalid access of tc_classid for LWT_OUT",
3667 .insns = {
3668 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3669 offsetof(struct __sk_buff, tc_classid)),
3670 BPF_EXIT_INSN(),
3671 },
3672 .result = REJECT,
3673 .errstr = "invalid bpf_context access",
3674 },
3675 {
3676 "invalid access of tc_classid for LWT_XMIT",
3677 .insns = {
3678 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3679 offsetof(struct __sk_buff, tc_classid)),
3680 BPF_EXIT_INSN(),
3681 },
3682 .result = REJECT,
3683 .errstr = "invalid bpf_context access",
3684 },
Gianluca Borello57225692017-01-09 10:19:47 -08003685 {
3686 "helper access to map: full range",
3687 .insns = {
3688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3690 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3691 BPF_LD_MAP_FD(BPF_REG_1, 0),
3692 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3695 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3696 BPF_MOV64_IMM(BPF_REG_3, 0),
3697 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3698 BPF_EXIT_INSN(),
3699 },
3700 .fixup_map2 = { 3 },
3701 .result = ACCEPT,
3702 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3703 },
3704 {
3705 "helper access to map: partial range",
3706 .insns = {
3707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3709 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3710 BPF_LD_MAP_FD(BPF_REG_1, 0),
3711 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3714 BPF_MOV64_IMM(BPF_REG_2, 8),
3715 BPF_MOV64_IMM(BPF_REG_3, 0),
3716 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3717 BPF_EXIT_INSN(),
3718 },
3719 .fixup_map2 = { 3 },
3720 .result = ACCEPT,
3721 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3722 },
3723 {
3724 "helper access to map: empty range",
3725 .insns = {
3726 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3728 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3729 BPF_LD_MAP_FD(BPF_REG_1, 0),
3730 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3731 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3732 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3733 BPF_MOV64_IMM(BPF_REG_2, 0),
3734 BPF_MOV64_IMM(BPF_REG_3, 0),
3735 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3736 BPF_EXIT_INSN(),
3737 },
3738 .fixup_map2 = { 3 },
3739 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3740 .result = REJECT,
3741 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3742 },
3743 {
3744 "helper access to map: out-of-bound range",
3745 .insns = {
3746 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3748 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3749 BPF_LD_MAP_FD(BPF_REG_1, 0),
3750 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3751 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3753 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3754 BPF_MOV64_IMM(BPF_REG_3, 0),
3755 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3756 BPF_EXIT_INSN(),
3757 },
3758 .fixup_map2 = { 3 },
3759 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3760 .result = REJECT,
3761 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3762 },
3763 {
3764 "helper access to map: negative range",
3765 .insns = {
3766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3768 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3769 BPF_LD_MAP_FD(BPF_REG_1, 0),
3770 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3772 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3773 BPF_MOV64_IMM(BPF_REG_2, -8),
3774 BPF_MOV64_IMM(BPF_REG_3, 0),
3775 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3776 BPF_EXIT_INSN(),
3777 },
3778 .fixup_map2 = { 3 },
3779 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3780 .result = REJECT,
3781 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3782 },
3783 {
3784 "helper access to adjusted map (via const imm): full range",
3785 .insns = {
3786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3788 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3789 BPF_LD_MAP_FD(BPF_REG_1, 0),
3790 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3794 offsetof(struct test_val, foo)),
3795 BPF_MOV64_IMM(BPF_REG_2,
3796 sizeof(struct test_val) -
3797 offsetof(struct test_val, foo)),
3798 BPF_MOV64_IMM(BPF_REG_3, 0),
3799 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3800 BPF_EXIT_INSN(),
3801 },
3802 .fixup_map2 = { 3 },
3803 .result = ACCEPT,
3804 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3805 },
3806 {
3807 "helper access to adjusted map (via const imm): partial range",
3808 .insns = {
3809 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3811 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3812 BPF_LD_MAP_FD(BPF_REG_1, 0),
3813 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3815 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3817 offsetof(struct test_val, foo)),
3818 BPF_MOV64_IMM(BPF_REG_2, 8),
3819 BPF_MOV64_IMM(BPF_REG_3, 0),
3820 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3821 BPF_EXIT_INSN(),
3822 },
3823 .fixup_map2 = { 3 },
3824 .result = ACCEPT,
3825 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3826 },
3827 {
3828 "helper access to adjusted map (via const imm): empty range",
3829 .insns = {
3830 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3832 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3833 BPF_LD_MAP_FD(BPF_REG_1, 0),
3834 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3835 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3836 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3838 offsetof(struct test_val, foo)),
3839 BPF_MOV64_IMM(BPF_REG_2, 0),
3840 BPF_MOV64_IMM(BPF_REG_3, 0),
3841 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3842 BPF_EXIT_INSN(),
3843 },
3844 .fixup_map2 = { 3 },
3845 .errstr = "R1 min value is outside of the array range",
3846 .result = REJECT,
3847 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3848 },
3849 {
3850 "helper access to adjusted map (via const imm): out-of-bound range",
3851 .insns = {
3852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3854 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3855 BPF_LD_MAP_FD(BPF_REG_1, 0),
3856 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3857 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3860 offsetof(struct test_val, foo)),
3861 BPF_MOV64_IMM(BPF_REG_2,
3862 sizeof(struct test_val) -
3863 offsetof(struct test_val, foo) + 8),
3864 BPF_MOV64_IMM(BPF_REG_3, 0),
3865 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3866 BPF_EXIT_INSN(),
3867 },
3868 .fixup_map2 = { 3 },
3869 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3870 .result = REJECT,
3871 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3872 },
3873 {
3874 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3875 .insns = {
3876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3878 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3879 BPF_LD_MAP_FD(BPF_REG_1, 0),
3880 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3881 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3882 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3884 offsetof(struct test_val, foo)),
3885 BPF_MOV64_IMM(BPF_REG_2, -8),
3886 BPF_MOV64_IMM(BPF_REG_3, 0),
3887 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3888 BPF_EXIT_INSN(),
3889 },
3890 .fixup_map2 = { 3 },
3891 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3892 .result = REJECT,
3893 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3894 },
3895 {
3896 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3897 .insns = {
3898 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3900 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3901 BPF_LD_MAP_FD(BPF_REG_1, 0),
3902 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3906 offsetof(struct test_val, foo)),
3907 BPF_MOV64_IMM(BPF_REG_2, -1),
3908 BPF_MOV64_IMM(BPF_REG_3, 0),
3909 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3910 BPF_EXIT_INSN(),
3911 },
3912 .fixup_map2 = { 3 },
3913 .errstr = "R1 min value is outside of the array range",
3914 .result = REJECT,
3915 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3916 },
3917 {
3918 "helper access to adjusted map (via const reg): full range",
3919 .insns = {
3920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3922 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3923 BPF_LD_MAP_FD(BPF_REG_1, 0),
3924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3925 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3927 BPF_MOV64_IMM(BPF_REG_3,
3928 offsetof(struct test_val, foo)),
3929 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3930 BPF_MOV64_IMM(BPF_REG_2,
3931 sizeof(struct test_val) -
3932 offsetof(struct test_val, foo)),
3933 BPF_MOV64_IMM(BPF_REG_3, 0),
3934 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3935 BPF_EXIT_INSN(),
3936 },
3937 .fixup_map2 = { 3 },
3938 .result = ACCEPT,
3939 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3940 },
3941 {
3942 "helper access to adjusted map (via const reg): partial range",
3943 .insns = {
3944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3946 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3947 BPF_LD_MAP_FD(BPF_REG_1, 0),
3948 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3950 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3951 BPF_MOV64_IMM(BPF_REG_3,
3952 offsetof(struct test_val, foo)),
3953 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3954 BPF_MOV64_IMM(BPF_REG_2, 8),
3955 BPF_MOV64_IMM(BPF_REG_3, 0),
3956 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3957 BPF_EXIT_INSN(),
3958 },
3959 .fixup_map2 = { 3 },
3960 .result = ACCEPT,
3961 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3962 },
3963 {
3964 "helper access to adjusted map (via const reg): empty range",
3965 .insns = {
3966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3968 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3969 BPF_LD_MAP_FD(BPF_REG_1, 0),
3970 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3971 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3972 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3973 BPF_MOV64_IMM(BPF_REG_3, 0),
3974 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3975 BPF_MOV64_IMM(BPF_REG_2, 0),
3976 BPF_MOV64_IMM(BPF_REG_3, 0),
3977 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3978 BPF_EXIT_INSN(),
3979 },
3980 .fixup_map2 = { 3 },
3981 .errstr = "R1 min value is outside of the array range",
3982 .result = REJECT,
3983 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3984 },
3985 {
3986 "helper access to adjusted map (via const reg): out-of-bound range",
3987 .insns = {
3988 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3990 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3991 BPF_LD_MAP_FD(BPF_REG_1, 0),
3992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3993 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3995 BPF_MOV64_IMM(BPF_REG_3,
3996 offsetof(struct test_val, foo)),
3997 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3998 BPF_MOV64_IMM(BPF_REG_2,
3999 sizeof(struct test_val) -
4000 offsetof(struct test_val, foo) + 8),
4001 BPF_MOV64_IMM(BPF_REG_3, 0),
4002 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4003 BPF_EXIT_INSN(),
4004 },
4005 .fixup_map2 = { 3 },
4006 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4007 .result = REJECT,
4008 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4009 },
4010 {
4011 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4012 .insns = {
4013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4015 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4016 BPF_LD_MAP_FD(BPF_REG_1, 0),
4017 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4020 BPF_MOV64_IMM(BPF_REG_3,
4021 offsetof(struct test_val, foo)),
4022 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4023 BPF_MOV64_IMM(BPF_REG_2, -8),
4024 BPF_MOV64_IMM(BPF_REG_3, 0),
4025 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4026 BPF_EXIT_INSN(),
4027 },
4028 .fixup_map2 = { 3 },
4029 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
4030 .result = REJECT,
4031 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4032 },
4033 {
4034 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4035 .insns = {
4036 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4038 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4039 BPF_LD_MAP_FD(BPF_REG_1, 0),
4040 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4041 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4042 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4043 BPF_MOV64_IMM(BPF_REG_3,
4044 offsetof(struct test_val, foo)),
4045 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4046 BPF_MOV64_IMM(BPF_REG_2, -1),
4047 BPF_MOV64_IMM(BPF_REG_3, 0),
4048 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4049 BPF_EXIT_INSN(),
4050 },
4051 .fixup_map2 = { 3 },
4052 .errstr = "R1 min value is outside of the array range",
4053 .result = REJECT,
4054 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4055 },
4056 {
4057 "helper access to adjusted map (via variable): full range",
4058 .insns = {
4059 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4061 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4062 BPF_LD_MAP_FD(BPF_REG_1, 0),
4063 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4066 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4067 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4068 offsetof(struct test_val, foo), 4),
4069 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4070 BPF_MOV64_IMM(BPF_REG_2,
4071 sizeof(struct test_val) -
4072 offsetof(struct test_val, foo)),
4073 BPF_MOV64_IMM(BPF_REG_3, 0),
4074 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4075 BPF_EXIT_INSN(),
4076 },
4077 .fixup_map2 = { 3 },
4078 .result = ACCEPT,
4079 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4080 },
4081 {
4082 "helper access to adjusted map (via variable): partial range",
4083 .insns = {
4084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4087 BPF_LD_MAP_FD(BPF_REG_1, 0),
4088 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4089 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4090 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4091 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4092 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4093 offsetof(struct test_val, foo), 4),
4094 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4095 BPF_MOV64_IMM(BPF_REG_2, 8),
4096 BPF_MOV64_IMM(BPF_REG_3, 0),
4097 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4098 BPF_EXIT_INSN(),
4099 },
4100 .fixup_map2 = { 3 },
4101 .result = ACCEPT,
4102 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4103 },
4104 {
4105 "helper access to adjusted map (via variable): empty range",
4106 .insns = {
4107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4109 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4110 BPF_LD_MAP_FD(BPF_REG_1, 0),
4111 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4113 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4114 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4115 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4116 offsetof(struct test_val, foo), 4),
4117 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4118 BPF_MOV64_IMM(BPF_REG_2, 0),
4119 BPF_MOV64_IMM(BPF_REG_3, 0),
4120 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4121 BPF_EXIT_INSN(),
4122 },
4123 .fixup_map2 = { 3 },
4124 .errstr = "R1 min value is outside of the array range",
4125 .result = REJECT,
4126 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4127 },
4128 {
4129 "helper access to adjusted map (via variable): no max check",
4130 .insns = {
4131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4133 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4134 BPF_LD_MAP_FD(BPF_REG_1, 0),
4135 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4137 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4138 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4139 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4140 BPF_MOV64_IMM(BPF_REG_2, 0),
4141 BPF_MOV64_IMM(BPF_REG_3, 0),
4142 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4143 BPF_EXIT_INSN(),
4144 },
4145 .fixup_map2 = { 3 },
4146 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
4147 .result = REJECT,
4148 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4149 },
4150 {
4151 "helper access to adjusted map (via variable): wrong max check",
4152 .insns = {
4153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4155 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4156 BPF_LD_MAP_FD(BPF_REG_1, 0),
4157 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4160 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4161 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4162 offsetof(struct test_val, foo), 4),
4163 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4164 BPF_MOV64_IMM(BPF_REG_2,
4165 sizeof(struct test_val) -
4166 offsetof(struct test_val, foo) + 1),
4167 BPF_MOV64_IMM(BPF_REG_3, 0),
4168 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4169 BPF_EXIT_INSN(),
4170 },
4171 .fixup_map2 = { 3 },
4172 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4173 .result = REJECT,
4174 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4175 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08004176 {
4177 "map element value is preserved across register spilling",
4178 .insns = {
4179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4181 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4182 BPF_LD_MAP_FD(BPF_REG_1, 0),
4183 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4185 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4188 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4189 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4190 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4191 BPF_EXIT_INSN(),
4192 },
4193 .fixup_map2 = { 3 },
4194 .errstr_unpriv = "R0 leaks addr",
4195 .result = ACCEPT,
4196 .result_unpriv = REJECT,
4197 },
4198 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004199 "map element value or null is marked on register spilling",
4200 .insns = {
4201 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4203 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4204 BPF_LD_MAP_FD(BPF_REG_1, 0),
4205 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4208 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4210 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4211 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4212 BPF_EXIT_INSN(),
4213 },
4214 .fixup_map2 = { 3 },
4215 .errstr_unpriv = "R0 leaks addr",
4216 .result = ACCEPT,
4217 .result_unpriv = REJECT,
4218 },
4219 {
4220 "map element value store of cleared call register",
4221 .insns = {
4222 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4224 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4225 BPF_LD_MAP_FD(BPF_REG_1, 0),
4226 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4228 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4229 BPF_EXIT_INSN(),
4230 },
4231 .fixup_map2 = { 3 },
4232 .errstr_unpriv = "R1 !read_ok",
4233 .errstr = "R1 !read_ok",
4234 .result = REJECT,
4235 .result_unpriv = REJECT,
4236 },
4237 {
4238 "map element value with unaligned store",
4239 .insns = {
4240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4242 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4243 BPF_LD_MAP_FD(BPF_REG_1, 0),
4244 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4247 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4248 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4249 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4250 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4251 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4252 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4253 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4255 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4256 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4257 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4258 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4260 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4261 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4262 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4263 BPF_EXIT_INSN(),
4264 },
4265 .fixup_map2 = { 3 },
4266 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4267 .result = ACCEPT,
4268 .result_unpriv = REJECT,
4269 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4270 },
4271 {
4272 "map element value with unaligned load",
4273 .insns = {
4274 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4276 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4277 BPF_LD_MAP_FD(BPF_REG_1, 0),
4278 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4279 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4280 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4281 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4283 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4284 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4285 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4286 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4287 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4289 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4290 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4291 BPF_EXIT_INSN(),
4292 },
4293 .fixup_map2 = { 3 },
4294 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4295 .result = ACCEPT,
4296 .result_unpriv = REJECT,
4297 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4298 },
4299 {
4300 "map element value illegal alu op, 1",
4301 .insns = {
4302 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4304 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4305 BPF_LD_MAP_FD(BPF_REG_1, 0),
4306 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4307 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4308 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4309 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4310 BPF_EXIT_INSN(),
4311 },
4312 .fixup_map2 = { 3 },
4313 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4314 .errstr = "invalid mem access 'inv'",
4315 .result = REJECT,
4316 .result_unpriv = REJECT,
4317 },
4318 {
4319 "map element value illegal alu op, 2",
4320 .insns = {
4321 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4323 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4324 BPF_LD_MAP_FD(BPF_REG_1, 0),
4325 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4326 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4327 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4328 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4329 BPF_EXIT_INSN(),
4330 },
4331 .fixup_map2 = { 3 },
4332 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4333 .errstr = "invalid mem access 'inv'",
4334 .result = REJECT,
4335 .result_unpriv = REJECT,
4336 },
4337 {
4338 "map element value illegal alu op, 3",
4339 .insns = {
4340 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4342 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4343 BPF_LD_MAP_FD(BPF_REG_1, 0),
4344 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4345 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4346 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4347 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4348 BPF_EXIT_INSN(),
4349 },
4350 .fixup_map2 = { 3 },
4351 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4352 .errstr = "invalid mem access 'inv'",
4353 .result = REJECT,
4354 .result_unpriv = REJECT,
4355 },
4356 {
4357 "map element value illegal alu op, 4",
4358 .insns = {
4359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4361 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4362 BPF_LD_MAP_FD(BPF_REG_1, 0),
4363 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4365 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4366 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4367 BPF_EXIT_INSN(),
4368 },
4369 .fixup_map2 = { 3 },
4370 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4371 .errstr = "invalid mem access 'inv'",
4372 .result = REJECT,
4373 .result_unpriv = REJECT,
4374 },
4375 {
4376 "map element value illegal alu op, 5",
4377 .insns = {
4378 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4380 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4381 BPF_LD_MAP_FD(BPF_REG_1, 0),
4382 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4383 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4384 BPF_MOV64_IMM(BPF_REG_3, 4096),
4385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4387 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4388 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4389 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4390 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4391 BPF_EXIT_INSN(),
4392 },
4393 .fixup_map2 = { 3 },
4394 .errstr_unpriv = "R0 invalid mem access 'inv'",
4395 .errstr = "R0 invalid mem access 'inv'",
4396 .result = REJECT,
4397 .result_unpriv = REJECT,
4398 },
4399 {
4400 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08004401 .insns = {
4402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4404 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4405 BPF_LD_MAP_FD(BPF_REG_1, 0),
4406 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4407 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
4409 offsetof(struct test_val, foo)),
4410 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4413 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4414 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4415 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4416 BPF_EXIT_INSN(),
4417 },
4418 .fixup_map2 = { 3 },
4419 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4420 .result = ACCEPT,
4421 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004422 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08004423 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08004424 {
4425 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
4426 .insns = {
4427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4429 BPF_MOV64_IMM(BPF_REG_0, 0),
4430 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4431 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4432 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4433 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4435 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4436 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4437 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4438 BPF_MOV64_IMM(BPF_REG_2, 16),
4439 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4440 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4441 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4442 BPF_MOV64_IMM(BPF_REG_4, 0),
4443 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4444 BPF_MOV64_IMM(BPF_REG_3, 0),
4445 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4446 BPF_MOV64_IMM(BPF_REG_0, 0),
4447 BPF_EXIT_INSN(),
4448 },
4449 .result = ACCEPT,
4450 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4451 },
4452 {
4453 "helper access to variable memory: stack, bitwise AND, zero included",
4454 .insns = {
4455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4457 BPF_MOV64_IMM(BPF_REG_2, 16),
4458 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4459 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4460 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4461 BPF_MOV64_IMM(BPF_REG_3, 0),
4462 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4463 BPF_EXIT_INSN(),
4464 },
4465 .errstr = "invalid stack type R1 off=-64 access_size=0",
4466 .result = REJECT,
4467 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4468 },
4469 {
4470 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4471 .insns = {
4472 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4474 BPF_MOV64_IMM(BPF_REG_2, 16),
4475 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4476 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4477 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4478 BPF_MOV64_IMM(BPF_REG_4, 0),
4479 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4480 BPF_MOV64_IMM(BPF_REG_3, 0),
4481 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4482 BPF_MOV64_IMM(BPF_REG_0, 0),
4483 BPF_EXIT_INSN(),
4484 },
4485 .errstr = "invalid stack type R1 off=-64 access_size=65",
4486 .result = REJECT,
4487 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4488 },
4489 {
4490 "helper access to variable memory: stack, JMP, correct bounds",
4491 .insns = {
4492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4494 BPF_MOV64_IMM(BPF_REG_0, 0),
4495 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4496 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4497 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4498 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4500 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4501 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4502 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4503 BPF_MOV64_IMM(BPF_REG_2, 16),
4504 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4505 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4506 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4507 BPF_MOV64_IMM(BPF_REG_4, 0),
4508 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4509 BPF_MOV64_IMM(BPF_REG_3, 0),
4510 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4511 BPF_MOV64_IMM(BPF_REG_0, 0),
4512 BPF_EXIT_INSN(),
4513 },
4514 .result = ACCEPT,
4515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4516 },
4517 {
4518 "helper access to variable memory: stack, JMP (signed), correct bounds",
4519 .insns = {
4520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4522 BPF_MOV64_IMM(BPF_REG_0, 0),
4523 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4524 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4525 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4526 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4527 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4528 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4529 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4530 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4531 BPF_MOV64_IMM(BPF_REG_2, 16),
4532 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4533 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4534 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4535 BPF_MOV64_IMM(BPF_REG_4, 0),
4536 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4537 BPF_MOV64_IMM(BPF_REG_3, 0),
4538 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4539 BPF_MOV64_IMM(BPF_REG_0, 0),
4540 BPF_EXIT_INSN(),
4541 },
4542 .result = ACCEPT,
4543 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4544 },
4545 {
4546 "helper access to variable memory: stack, JMP, bounds + offset",
4547 .insns = {
4548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4550 BPF_MOV64_IMM(BPF_REG_2, 16),
4551 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4552 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4553 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4554 BPF_MOV64_IMM(BPF_REG_4, 0),
4555 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4557 BPF_MOV64_IMM(BPF_REG_3, 0),
4558 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4559 BPF_MOV64_IMM(BPF_REG_0, 0),
4560 BPF_EXIT_INSN(),
4561 },
4562 .errstr = "invalid stack type R1 off=-64 access_size=65",
4563 .result = REJECT,
4564 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4565 },
4566 {
4567 "helper access to variable memory: stack, JMP, wrong max",
4568 .insns = {
4569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4571 BPF_MOV64_IMM(BPF_REG_2, 16),
4572 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4573 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4574 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4575 BPF_MOV64_IMM(BPF_REG_4, 0),
4576 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4577 BPF_MOV64_IMM(BPF_REG_3, 0),
4578 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4579 BPF_MOV64_IMM(BPF_REG_0, 0),
4580 BPF_EXIT_INSN(),
4581 },
4582 .errstr = "invalid stack type R1 off=-64 access_size=65",
4583 .result = REJECT,
4584 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4585 },
4586 {
4587 "helper access to variable memory: stack, JMP, no max check",
4588 .insns = {
4589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4591 BPF_MOV64_IMM(BPF_REG_2, 16),
4592 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4593 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4594 BPF_MOV64_IMM(BPF_REG_4, 0),
4595 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4596 BPF_MOV64_IMM(BPF_REG_3, 0),
4597 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4598 BPF_MOV64_IMM(BPF_REG_0, 0),
4599 BPF_EXIT_INSN(),
4600 },
4601 .errstr = "R2 unbounded memory access",
4602 .result = REJECT,
4603 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4604 },
4605 {
4606 "helper access to variable memory: stack, JMP, no min check",
4607 .insns = {
4608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4610 BPF_MOV64_IMM(BPF_REG_2, 16),
4611 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4612 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4613 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4614 BPF_MOV64_IMM(BPF_REG_3, 0),
4615 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4616 BPF_MOV64_IMM(BPF_REG_0, 0),
4617 BPF_EXIT_INSN(),
4618 },
4619 .errstr = "invalid stack type R1 off=-64 access_size=0",
4620 .result = REJECT,
4621 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4622 },
4623 {
4624 "helper access to variable memory: stack, JMP (signed), no min check",
4625 .insns = {
4626 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4628 BPF_MOV64_IMM(BPF_REG_2, 16),
4629 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4630 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4631 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4632 BPF_MOV64_IMM(BPF_REG_3, 0),
4633 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4634 BPF_MOV64_IMM(BPF_REG_0, 0),
4635 BPF_EXIT_INSN(),
4636 },
4637 .errstr = "R2 min value is negative",
4638 .result = REJECT,
4639 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4640 },
4641 {
4642 "helper access to variable memory: map, JMP, correct bounds",
4643 .insns = {
4644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4646 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4647 BPF_LD_MAP_FD(BPF_REG_1, 0),
4648 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4650 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4651 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4652 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4653 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4654 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4655 sizeof(struct test_val), 4),
4656 BPF_MOV64_IMM(BPF_REG_4, 0),
4657 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4658 BPF_MOV64_IMM(BPF_REG_3, 0),
4659 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4660 BPF_MOV64_IMM(BPF_REG_0, 0),
4661 BPF_EXIT_INSN(),
4662 },
4663 .fixup_map2 = { 3 },
4664 .result = ACCEPT,
4665 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4666 },
4667 {
4668 "helper access to variable memory: map, JMP, wrong max",
4669 .insns = {
4670 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4672 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4673 BPF_LD_MAP_FD(BPF_REG_1, 0),
4674 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4676 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4677 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4678 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4679 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4680 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4681 sizeof(struct test_val) + 1, 4),
4682 BPF_MOV64_IMM(BPF_REG_4, 0),
4683 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4684 BPF_MOV64_IMM(BPF_REG_3, 0),
4685 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4686 BPF_MOV64_IMM(BPF_REG_0, 0),
4687 BPF_EXIT_INSN(),
4688 },
4689 .fixup_map2 = { 3 },
4690 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4691 .result = REJECT,
4692 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4693 },
4694 {
4695 "helper access to variable memory: map adjusted, JMP, correct bounds",
4696 .insns = {
4697 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4699 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4700 BPF_LD_MAP_FD(BPF_REG_1, 0),
4701 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4702 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4705 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4706 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4707 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4708 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4709 sizeof(struct test_val) - 20, 4),
4710 BPF_MOV64_IMM(BPF_REG_4, 0),
4711 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4712 BPF_MOV64_IMM(BPF_REG_3, 0),
4713 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4714 BPF_MOV64_IMM(BPF_REG_0, 0),
4715 BPF_EXIT_INSN(),
4716 },
4717 .fixup_map2 = { 3 },
4718 .result = ACCEPT,
4719 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4720 },
4721 {
4722 "helper access to variable memory: map adjusted, JMP, wrong max",
4723 .insns = {
4724 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4726 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4727 BPF_LD_MAP_FD(BPF_REG_1, 0),
4728 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4729 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4730 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4732 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4733 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4734 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4735 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4736 sizeof(struct test_val) - 19, 4),
4737 BPF_MOV64_IMM(BPF_REG_4, 0),
4738 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4739 BPF_MOV64_IMM(BPF_REG_3, 0),
4740 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4741 BPF_MOV64_IMM(BPF_REG_0, 0),
4742 BPF_EXIT_INSN(),
4743 },
4744 .fixup_map2 = { 3 },
4745 .errstr = "R1 min value is outside of the array range",
4746 .result = REJECT,
4747 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4748 },
4749 {
4750 "helper access to variable memory: size > 0 not allowed on NULL",
4751 .insns = {
4752 BPF_MOV64_IMM(BPF_REG_1, 0),
4753 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004754 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4755 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004756 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4757 BPF_MOV64_IMM(BPF_REG_3, 0),
4758 BPF_MOV64_IMM(BPF_REG_4, 0),
4759 BPF_MOV64_IMM(BPF_REG_5, 0),
4760 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4761 BPF_EXIT_INSN(),
4762 },
4763 .errstr = "R1 type=imm expected=fp",
4764 .result = REJECT,
4765 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4766 },
4767 {
4768 "helper access to variable memory: size = 0 not allowed on != NULL",
4769 .insns = {
4770 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4772 BPF_MOV64_IMM(BPF_REG_2, 0),
4773 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4774 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4775 BPF_MOV64_IMM(BPF_REG_3, 0),
4776 BPF_MOV64_IMM(BPF_REG_4, 0),
4777 BPF_MOV64_IMM(BPF_REG_5, 0),
4778 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4779 BPF_EXIT_INSN(),
4780 },
4781 .errstr = "invalid stack type R1 off=-8 access_size=0",
4782 .result = REJECT,
4783 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4784 },
4785 {
4786 "helper access to variable memory: 8 bytes leak",
4787 .insns = {
4788 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4790 BPF_MOV64_IMM(BPF_REG_0, 0),
4791 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4792 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4793 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4794 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4798 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4800 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004801 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4803 BPF_MOV64_IMM(BPF_REG_3, 0),
4804 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4805 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4806 BPF_EXIT_INSN(),
4807 },
4808 .errstr = "invalid indirect read from stack off -64+32 size 64",
4809 .result = REJECT,
4810 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4811 },
4812 {
4813 "helper access to variable memory: 8 bytes no leak (init memory)",
4814 .insns = {
4815 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4816 BPF_MOV64_IMM(BPF_REG_0, 0),
4817 BPF_MOV64_IMM(BPF_REG_0, 0),
4818 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4819 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4820 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4821 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4822 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4823 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4824 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4825 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4827 BPF_MOV64_IMM(BPF_REG_2, 0),
4828 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4830 BPF_MOV64_IMM(BPF_REG_3, 0),
4831 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4832 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4833 BPF_EXIT_INSN(),
4834 },
4835 .result = ACCEPT,
4836 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4837 },
Josef Bacik29200c12017-02-03 16:25:23 -05004838 {
4839 "invalid and of negative number",
4840 .insns = {
4841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4844 BPF_LD_MAP_FD(BPF_REG_1, 0),
4845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4846 BPF_FUNC_map_lookup_elem),
4847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4848 BPF_MOV64_IMM(BPF_REG_1, 6),
4849 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4850 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4851 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4852 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4853 offsetof(struct test_val, foo)),
4854 BPF_EXIT_INSN(),
4855 },
4856 .fixup_map2 = { 3 },
4857 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4858 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4859 .result = REJECT,
4860 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004861 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05004862 },
4863 {
4864 "invalid range check",
4865 .insns = {
4866 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4867 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4869 BPF_LD_MAP_FD(BPF_REG_1, 0),
4870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4871 BPF_FUNC_map_lookup_elem),
4872 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4873 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4874 BPF_MOV64_IMM(BPF_REG_9, 1),
4875 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4876 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4877 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4878 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4879 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4880 BPF_MOV32_IMM(BPF_REG_3, 1),
4881 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4882 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4883 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4884 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4885 BPF_MOV64_REG(BPF_REG_0, 0),
4886 BPF_EXIT_INSN(),
4887 },
4888 .fixup_map2 = { 3 },
4889 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4890 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4891 .result = REJECT,
4892 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004893 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004894 },
4895 {
4896 "map in map access",
4897 .insns = {
4898 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4901 BPF_LD_MAP_FD(BPF_REG_1, 0),
4902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4903 BPF_FUNC_map_lookup_elem),
4904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4905 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4906 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4910 BPF_FUNC_map_lookup_elem),
4911 BPF_MOV64_REG(BPF_REG_0, 0),
4912 BPF_EXIT_INSN(),
4913 },
4914 .fixup_map_in_map = { 3 },
4915 .result = ACCEPT,
4916 },
4917 {
4918 "invalid inner map pointer",
4919 .insns = {
4920 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4923 BPF_LD_MAP_FD(BPF_REG_1, 0),
4924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4925 BPF_FUNC_map_lookup_elem),
4926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4927 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4928 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4930 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4933 BPF_FUNC_map_lookup_elem),
4934 BPF_MOV64_REG(BPF_REG_0, 0),
4935 BPF_EXIT_INSN(),
4936 },
4937 .fixup_map_in_map = { 3 },
4938 .errstr = "R1 type=inv expected=map_ptr",
4939 .errstr_unpriv = "R1 pointer arithmetic prohibited",
4940 .result = REJECT,
4941 },
4942 {
4943 "forgot null checking on the inner map pointer",
4944 .insns = {
4945 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4946 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4948 BPF_LD_MAP_FD(BPF_REG_1, 0),
4949 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4950 BPF_FUNC_map_lookup_elem),
4951 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4952 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4954 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4956 BPF_FUNC_map_lookup_elem),
4957 BPF_MOV64_REG(BPF_REG_0, 0),
4958 BPF_EXIT_INSN(),
4959 },
4960 .fixup_map_in_map = { 3 },
4961 .errstr = "R1 type=map_value_or_null expected=map_ptr",
4962 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004963 },
4964 {
4965 "ld_abs: check calling conv, r1",
4966 .insns = {
4967 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4968 BPF_MOV64_IMM(BPF_REG_1, 0),
4969 BPF_LD_ABS(BPF_W, -0x200000),
4970 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4971 BPF_EXIT_INSN(),
4972 },
4973 .errstr = "R1 !read_ok",
4974 .result = REJECT,
4975 },
4976 {
4977 "ld_abs: check calling conv, r2",
4978 .insns = {
4979 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4980 BPF_MOV64_IMM(BPF_REG_2, 0),
4981 BPF_LD_ABS(BPF_W, -0x200000),
4982 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4983 BPF_EXIT_INSN(),
4984 },
4985 .errstr = "R2 !read_ok",
4986 .result = REJECT,
4987 },
4988 {
4989 "ld_abs: check calling conv, r3",
4990 .insns = {
4991 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4992 BPF_MOV64_IMM(BPF_REG_3, 0),
4993 BPF_LD_ABS(BPF_W, -0x200000),
4994 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
4995 BPF_EXIT_INSN(),
4996 },
4997 .errstr = "R3 !read_ok",
4998 .result = REJECT,
4999 },
5000 {
5001 "ld_abs: check calling conv, r4",
5002 .insns = {
5003 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5004 BPF_MOV64_IMM(BPF_REG_4, 0),
5005 BPF_LD_ABS(BPF_W, -0x200000),
5006 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5007 BPF_EXIT_INSN(),
5008 },
5009 .errstr = "R4 !read_ok",
5010 .result = REJECT,
5011 },
5012 {
5013 "ld_abs: check calling conv, r5",
5014 .insns = {
5015 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5016 BPF_MOV64_IMM(BPF_REG_5, 0),
5017 BPF_LD_ABS(BPF_W, -0x200000),
5018 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5019 BPF_EXIT_INSN(),
5020 },
5021 .errstr = "R5 !read_ok",
5022 .result = REJECT,
5023 },
5024 {
5025 "ld_abs: check calling conv, r7",
5026 .insns = {
5027 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5028 BPF_MOV64_IMM(BPF_REG_7, 0),
5029 BPF_LD_ABS(BPF_W, -0x200000),
5030 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5031 BPF_EXIT_INSN(),
5032 },
5033 .result = ACCEPT,
5034 },
5035 {
5036 "ld_ind: check calling conv, r1",
5037 .insns = {
5038 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5039 BPF_MOV64_IMM(BPF_REG_1, 1),
5040 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5041 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5042 BPF_EXIT_INSN(),
5043 },
5044 .errstr = "R1 !read_ok",
5045 .result = REJECT,
5046 },
5047 {
5048 "ld_ind: check calling conv, r2",
5049 .insns = {
5050 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5051 BPF_MOV64_IMM(BPF_REG_2, 1),
5052 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5053 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5054 BPF_EXIT_INSN(),
5055 },
5056 .errstr = "R2 !read_ok",
5057 .result = REJECT,
5058 },
5059 {
5060 "ld_ind: check calling conv, r3",
5061 .insns = {
5062 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5063 BPF_MOV64_IMM(BPF_REG_3, 1),
5064 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5065 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5066 BPF_EXIT_INSN(),
5067 },
5068 .errstr = "R3 !read_ok",
5069 .result = REJECT,
5070 },
5071 {
5072 "ld_ind: check calling conv, r4",
5073 .insns = {
5074 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5075 BPF_MOV64_IMM(BPF_REG_4, 1),
5076 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5077 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5078 BPF_EXIT_INSN(),
5079 },
5080 .errstr = "R4 !read_ok",
5081 .result = REJECT,
5082 },
5083 {
5084 "ld_ind: check calling conv, r5",
5085 .insns = {
5086 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5087 BPF_MOV64_IMM(BPF_REG_5, 1),
5088 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5089 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5090 BPF_EXIT_INSN(),
5091 },
5092 .errstr = "R5 !read_ok",
5093 .result = REJECT,
5094 },
5095 {
5096 "ld_ind: check calling conv, r7",
5097 .insns = {
5098 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5099 BPF_MOV64_IMM(BPF_REG_7, 1),
5100 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5101 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5102 BPF_EXIT_INSN(),
5103 },
5104 .result = ACCEPT,
5105 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005106};
5107
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005108static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005109{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005110 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005111
5112 for (len = MAX_INSNS - 1; len > 0; --len)
5113 if (fp[len].code != 0 || fp[len].imm != 0)
5114 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005115 return len + 1;
5116}
5117
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005118static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005119{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005120 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005121
Mickaël Salaünf4874d02017-02-10 00:21:43 +01005122 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005123 size_value, max_elem, BPF_F_NO_PREALLOC);
5124 if (fd < 0)
5125 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005126
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005127 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005128}
5129
5130static int create_prog_array(void)
5131{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005132 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005133
Mickaël Salaünf4874d02017-02-10 00:21:43 +01005134 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005135 sizeof(int), 4, 0);
5136 if (fd < 0)
5137 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005138
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005139 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005140}
5141
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005142static int create_map_in_map(void)
5143{
5144 int inner_map_fd, outer_map_fd;
5145
5146 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
5147 sizeof(int), 1, 0);
5148 if (inner_map_fd < 0) {
5149 printf("Failed to create array '%s'!\n", strerror(errno));
5150 return inner_map_fd;
5151 }
5152
5153 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
5154 sizeof(int), inner_map_fd, 1, 0);
5155 if (outer_map_fd < 0)
5156 printf("Failed to create array of maps '%s'!\n",
5157 strerror(errno));
5158
5159 close(inner_map_fd);
5160
5161 return outer_map_fd;
5162}
5163
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005164static char bpf_vlog[32768];
5165
5166static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005167 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005168{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005169 int *fixup_map1 = test->fixup_map1;
5170 int *fixup_map2 = test->fixup_map2;
5171 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005172 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005173
5174 /* Allocating HTs with 1 elem is fine here, since we only test
5175 * for verifier and not do a runtime lookup, so the only thing
5176 * that really matters is value size in this case.
5177 */
5178 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005179 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005180 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005181 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005182 fixup_map1++;
5183 } while (*fixup_map1);
5184 }
5185
5186 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005187 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005188 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005189 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005190 fixup_map2++;
5191 } while (*fixup_map2);
5192 }
5193
5194 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005195 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005196 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005197 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005198 fixup_prog++;
5199 } while (*fixup_prog);
5200 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005201
5202 if (*fixup_map_in_map) {
5203 map_fds[3] = create_map_in_map();
5204 do {
5205 prog[*fixup_map_in_map].imm = map_fds[3];
5206 fixup_map_in_map++;
5207 } while (*fixup_map_in_map);
5208 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005209}
5210
5211static void do_test_single(struct bpf_test *test, bool unpriv,
5212 int *passes, int *errors)
5213{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005214 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005215 struct bpf_insn *prog = test->insns;
5216 int prog_len = probe_filter_length(prog);
5217 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005218 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005219 const char *expected_err;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005220 int i;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005221
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005222 for (i = 0; i < MAX_NR_MAPS; i++)
5223 map_fds[i] = -1;
5224
5225 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005226
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005227 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
5228 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
5229 "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005230
5231 expected_ret = unpriv && test->result_unpriv != UNDEF ?
5232 test->result_unpriv : test->result;
5233 expected_err = unpriv && test->errstr_unpriv ?
5234 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005235
5236 reject_from_alignment = fd_prog < 0 &&
5237 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
5238 strstr(bpf_vlog, "Unknown alignment.");
5239#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
5240 if (reject_from_alignment) {
5241 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
5242 strerror(errno));
5243 goto fail_log;
5244 }
5245#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005246 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005247 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005248 printf("FAIL\nFailed to load prog '%s'!\n",
5249 strerror(errno));
5250 goto fail_log;
5251 }
5252 } else {
5253 if (fd_prog >= 0) {
5254 printf("FAIL\nUnexpected success to load!\n");
5255 goto fail_log;
5256 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005257 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005258 printf("FAIL\nUnexpected error message!\n");
5259 goto fail_log;
5260 }
5261 }
5262
5263 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005264 printf("OK%s\n", reject_from_alignment ?
5265 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005266close_fds:
5267 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005268 for (i = 0; i < MAX_NR_MAPS; i++)
5269 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005270 sched_yield();
5271 return;
5272fail_log:
5273 (*errors)++;
5274 printf("%s", bpf_vlog);
5275 goto close_fds;
5276}
5277
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005278static bool is_admin(void)
5279{
5280 cap_t caps;
5281 cap_flag_value_t sysadmin = CAP_CLEAR;
5282 const cap_value_t cap_val = CAP_SYS_ADMIN;
5283
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08005284#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005285 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
5286 perror("cap_get_flag");
5287 return false;
5288 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08005289#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005290 caps = cap_get_proc();
5291 if (!caps) {
5292 perror("cap_get_proc");
5293 return false;
5294 }
5295 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
5296 perror("cap_get_flag");
5297 if (cap_free(caps))
5298 perror("cap_free");
5299 return (sysadmin == CAP_SET);
5300}
5301
5302static int set_admin(bool admin)
5303{
5304 cap_t caps;
5305 const cap_value_t cap_val = CAP_SYS_ADMIN;
5306 int ret = -1;
5307
5308 caps = cap_get_proc();
5309 if (!caps) {
5310 perror("cap_get_proc");
5311 return -1;
5312 }
5313 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
5314 admin ? CAP_SET : CAP_CLEAR)) {
5315 perror("cap_set_flag");
5316 goto out;
5317 }
5318 if (cap_set_proc(caps)) {
5319 perror("cap_set_proc");
5320 goto out;
5321 }
5322 ret = 0;
5323out:
5324 if (cap_free(caps))
5325 perror("cap_free");
5326 return ret;
5327}
5328
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005329static int do_test(bool unpriv, unsigned int from, unsigned int to)
5330{
5331 int i, passes = 0, errors = 0;
5332
5333 for (i = from; i < to; i++) {
5334 struct bpf_test *test = &tests[i];
5335
5336 /* Program types that are not supported by non-root we
5337 * skip right away.
5338 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005339 if (!test->prog_type) {
5340 if (!unpriv)
5341 set_admin(false);
5342 printf("#%d/u %s ", i, test->descr);
5343 do_test_single(test, true, &passes, &errors);
5344 if (!unpriv)
5345 set_admin(true);
5346 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005347
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005348 if (!unpriv) {
5349 printf("#%d/p %s ", i, test->descr);
5350 do_test_single(test, false, &passes, &errors);
5351 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005352 }
5353
5354 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +02005355 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005356}
5357
5358int main(int argc, char **argv)
5359{
5360 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
5361 struct rlimit rlim = { 1 << 20, 1 << 20 };
5362 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005363 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005364
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005365 if (argc == 3) {
5366 unsigned int l = atoi(argv[argc - 2]);
5367 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005368
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005369 if (l < to && u < to) {
5370 from = l;
5371 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005372 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005373 } else if (argc == 2) {
5374 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005375
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005376 if (t < to) {
5377 from = t;
5378 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005379 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005380 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005381
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005382 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
5383 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005384}