blob: 6178b65fee5941f9566fa711bd4ac77f34b816ad [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011#include <asm/types.h>
12#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010013#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070014#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070017#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070019#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070020#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020021#include <sched.h>
22
Mickaël Salaünd02d8982017-02-10 00:21:37 +010023#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070024#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070025
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020026#include <linux/unistd.h>
27#include <linux/filter.h>
28#include <linux/bpf_perf_event.h>
29#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070030
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010031#include <bpf/bpf.h>
32
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020033#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020041#include "../../../include/linux/filter.h"
42
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020043#ifndef ARRAY_SIZE
44# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
45#endif
46
47#define MAX_INSNS 512
48#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070049#define MAX_NR_MAPS 4
Alexei Starovoitovbf508872015-10-07 22:23:23 -070050
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020051#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
52
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070053struct bpf_test {
54 const char *descr;
55 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020056 int fixup_map1[MAX_FIXUPS];
57 int fixup_map2[MAX_FIXUPS];
58 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070059 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070060 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070061 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070063 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070064 ACCEPT,
65 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070066 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070067 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020068 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070069};
70
Josef Bacik48461132016-09-28 10:54:32 -040071/* Note we want this to be 64 bit aligned so that the end of our array is
72 * actually the end of the structure.
73 */
74#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040075
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020076struct test_val {
77 unsigned int index;
78 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040079};
80
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070081static struct bpf_test tests[] = {
82 {
83 "add+sub+mul",
84 .insns = {
85 BPF_MOV64_IMM(BPF_REG_1, 1),
86 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
87 BPF_MOV64_IMM(BPF_REG_2, 3),
88 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
89 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
90 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
91 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
92 BPF_EXIT_INSN(),
93 },
94 .result = ACCEPT,
95 },
96 {
97 "unreachable",
98 .insns = {
99 BPF_EXIT_INSN(),
100 BPF_EXIT_INSN(),
101 },
102 .errstr = "unreachable",
103 .result = REJECT,
104 },
105 {
106 "unreachable2",
107 .insns = {
108 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
109 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
110 BPF_EXIT_INSN(),
111 },
112 .errstr = "unreachable",
113 .result = REJECT,
114 },
115 {
116 "out of range jump",
117 .insns = {
118 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
119 BPF_EXIT_INSN(),
120 },
121 .errstr = "jump out of range",
122 .result = REJECT,
123 },
124 {
125 "out of range jump2",
126 .insns = {
127 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
128 BPF_EXIT_INSN(),
129 },
130 .errstr = "jump out of range",
131 .result = REJECT,
132 },
133 {
134 "test1 ld_imm64",
135 .insns = {
136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
137 BPF_LD_IMM64(BPF_REG_0, 0),
138 BPF_LD_IMM64(BPF_REG_0, 0),
139 BPF_LD_IMM64(BPF_REG_0, 1),
140 BPF_LD_IMM64(BPF_REG_0, 1),
141 BPF_MOV64_IMM(BPF_REG_0, 2),
142 BPF_EXIT_INSN(),
143 },
144 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700145 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700146 .result = REJECT,
147 },
148 {
149 "test2 ld_imm64",
150 .insns = {
151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
152 BPF_LD_IMM64(BPF_REG_0, 0),
153 BPF_LD_IMM64(BPF_REG_0, 0),
154 BPF_LD_IMM64(BPF_REG_0, 1),
155 BPF_LD_IMM64(BPF_REG_0, 1),
156 BPF_EXIT_INSN(),
157 },
158 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700159 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700160 .result = REJECT,
161 },
162 {
163 "test3 ld_imm64",
164 .insns = {
165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
166 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
167 BPF_LD_IMM64(BPF_REG_0, 0),
168 BPF_LD_IMM64(BPF_REG_0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 1),
170 BPF_LD_IMM64(BPF_REG_0, 1),
171 BPF_EXIT_INSN(),
172 },
173 .errstr = "invalid bpf_ld_imm64 insn",
174 .result = REJECT,
175 },
176 {
177 "test4 ld_imm64",
178 .insns = {
179 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
180 BPF_EXIT_INSN(),
181 },
182 .errstr = "invalid bpf_ld_imm64 insn",
183 .result = REJECT,
184 },
185 {
186 "test5 ld_imm64",
187 .insns = {
188 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
189 },
190 .errstr = "invalid bpf_ld_imm64 insn",
191 .result = REJECT,
192 },
193 {
194 "no bpf_exit",
195 .insns = {
196 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
197 },
198 .errstr = "jump out of range",
199 .result = REJECT,
200 },
201 {
202 "loop (back-edge)",
203 .insns = {
204 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
205 BPF_EXIT_INSN(),
206 },
207 .errstr = "back-edge",
208 .result = REJECT,
209 },
210 {
211 "loop2 (back-edge)",
212 .insns = {
213 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
215 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
216 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
217 BPF_EXIT_INSN(),
218 },
219 .errstr = "back-edge",
220 .result = REJECT,
221 },
222 {
223 "conditional loop",
224 .insns = {
225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
226 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
227 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
229 BPF_EXIT_INSN(),
230 },
231 .errstr = "back-edge",
232 .result = REJECT,
233 },
234 {
235 "read uninitialized register",
236 .insns = {
237 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "R2 !read_ok",
241 .result = REJECT,
242 },
243 {
244 "read invalid register",
245 .insns = {
246 BPF_MOV64_REG(BPF_REG_0, -1),
247 BPF_EXIT_INSN(),
248 },
249 .errstr = "R15 is invalid",
250 .result = REJECT,
251 },
252 {
253 "program doesn't init R0 before exit",
254 .insns = {
255 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
256 BPF_EXIT_INSN(),
257 },
258 .errstr = "R0 !read_ok",
259 .result = REJECT,
260 },
261 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700262 "program doesn't init R0 before exit in all branches",
263 .insns = {
264 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
265 BPF_MOV64_IMM(BPF_REG_0, 1),
266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
267 BPF_EXIT_INSN(),
268 },
269 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700270 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700271 .result = REJECT,
272 },
273 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700274 "stack out of bounds",
275 .insns = {
276 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
277 BPF_EXIT_INSN(),
278 },
279 .errstr = "invalid stack",
280 .result = REJECT,
281 },
282 {
283 "invalid call insn1",
284 .insns = {
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
286 BPF_EXIT_INSN(),
287 },
288 .errstr = "BPF_CALL uses reserved",
289 .result = REJECT,
290 },
291 {
292 "invalid call insn2",
293 .insns = {
294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
295 BPF_EXIT_INSN(),
296 },
297 .errstr = "BPF_CALL uses reserved",
298 .result = REJECT,
299 },
300 {
301 "invalid function call",
302 .insns = {
303 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
304 BPF_EXIT_INSN(),
305 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100306 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700307 .result = REJECT,
308 },
309 {
310 "uninitialized stack1",
311 .insns = {
312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
314 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200315 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
316 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700317 BPF_EXIT_INSN(),
318 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200319 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700320 .errstr = "invalid indirect read from stack",
321 .result = REJECT,
322 },
323 {
324 "uninitialized stack2",
325 .insns = {
326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
327 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
328 BPF_EXIT_INSN(),
329 },
330 .errstr = "invalid read from stack",
331 .result = REJECT,
332 },
333 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200334 "invalid argument register",
335 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
337 BPF_FUNC_get_cgroup_classid),
338 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
339 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200340 BPF_EXIT_INSN(),
341 },
342 .errstr = "R1 !read_ok",
343 .result = REJECT,
344 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
345 },
346 {
347 "non-invalid argument register",
348 .insns = {
349 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
351 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200352 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
354 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200355 BPF_EXIT_INSN(),
356 },
357 .result = ACCEPT,
358 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
359 },
360 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700361 "check valid spill/fill",
362 .insns = {
363 /* spill R1(ctx) into stack */
364 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700365 /* fill it back into R2 */
366 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700367 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100368 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
369 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700370 BPF_EXIT_INSN(),
371 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700372 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700373 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700374 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700375 },
376 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200377 "check valid spill/fill, skb mark",
378 .insns = {
379 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
380 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
381 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
382 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
383 offsetof(struct __sk_buff, mark)),
384 BPF_EXIT_INSN(),
385 },
386 .result = ACCEPT,
387 .result_unpriv = ACCEPT,
388 },
389 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700390 "check corrupted spill/fill",
391 .insns = {
392 /* spill R1(ctx) into stack */
393 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700394 /* mess up with R1 pointer on stack */
395 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700396 /* fill back into R0 should fail */
397 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700398 BPF_EXIT_INSN(),
399 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700400 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700401 .errstr = "corrupted spill",
402 .result = REJECT,
403 },
404 {
405 "invalid src register in STX",
406 .insns = {
407 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
408 BPF_EXIT_INSN(),
409 },
410 .errstr = "R15 is invalid",
411 .result = REJECT,
412 },
413 {
414 "invalid dst register in STX",
415 .insns = {
416 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
417 BPF_EXIT_INSN(),
418 },
419 .errstr = "R14 is invalid",
420 .result = REJECT,
421 },
422 {
423 "invalid dst register in ST",
424 .insns = {
425 BPF_ST_MEM(BPF_B, 14, -1, -1),
426 BPF_EXIT_INSN(),
427 },
428 .errstr = "R14 is invalid",
429 .result = REJECT,
430 },
431 {
432 "invalid src register in LDX",
433 .insns = {
434 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
435 BPF_EXIT_INSN(),
436 },
437 .errstr = "R12 is invalid",
438 .result = REJECT,
439 },
440 {
441 "invalid dst register in LDX",
442 .insns = {
443 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
444 BPF_EXIT_INSN(),
445 },
446 .errstr = "R11 is invalid",
447 .result = REJECT,
448 },
449 {
450 "junk insn",
451 .insns = {
452 BPF_RAW_INSN(0, 0, 0, 0, 0),
453 BPF_EXIT_INSN(),
454 },
455 .errstr = "invalid BPF_LD_IMM",
456 .result = REJECT,
457 },
458 {
459 "junk insn2",
460 .insns = {
461 BPF_RAW_INSN(1, 0, 0, 0, 0),
462 BPF_EXIT_INSN(),
463 },
464 .errstr = "BPF_LDX uses reserved fields",
465 .result = REJECT,
466 },
467 {
468 "junk insn3",
469 .insns = {
470 BPF_RAW_INSN(-1, 0, 0, 0, 0),
471 BPF_EXIT_INSN(),
472 },
473 .errstr = "invalid BPF_ALU opcode f0",
474 .result = REJECT,
475 },
476 {
477 "junk insn4",
478 .insns = {
479 BPF_RAW_INSN(-1, -1, -1, -1, -1),
480 BPF_EXIT_INSN(),
481 },
482 .errstr = "invalid BPF_ALU opcode f0",
483 .result = REJECT,
484 },
485 {
486 "junk insn5",
487 .insns = {
488 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
489 BPF_EXIT_INSN(),
490 },
491 .errstr = "BPF_ALU uses reserved fields",
492 .result = REJECT,
493 },
494 {
495 "misaligned read from stack",
496 .insns = {
497 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
498 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
499 BPF_EXIT_INSN(),
500 },
501 .errstr = "misaligned access",
502 .result = REJECT,
503 },
504 {
505 "invalid map_fd for function call",
506 .insns = {
507 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
508 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
510 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
512 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700513 BPF_EXIT_INSN(),
514 },
515 .errstr = "fd 0 is not pointing to valid bpf_map",
516 .result = REJECT,
517 },
518 {
519 "don't check return value before access",
520 .insns = {
521 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
524 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
526 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700527 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
528 BPF_EXIT_INSN(),
529 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200530 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700531 .errstr = "R0 invalid mem access 'map_value_or_null'",
532 .result = REJECT,
533 },
534 {
535 "access memory with incorrect alignment",
536 .insns = {
537 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
540 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
542 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700543 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
544 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
545 BPF_EXIT_INSN(),
546 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200547 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700548 .errstr = "misaligned access",
549 .result = REJECT,
550 },
551 {
552 "sometimes access memory with incorrect alignment",
553 .insns = {
554 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
557 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
559 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
561 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
562 BPF_EXIT_INSN(),
563 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
564 BPF_EXIT_INSN(),
565 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200566 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700567 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700568 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700569 .result = REJECT,
570 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700571 {
572 "jump test 1",
573 .insns = {
574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
575 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
577 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
579 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
581 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
583 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
585 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
587 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
588 BPF_MOV64_IMM(BPF_REG_0, 0),
589 BPF_EXIT_INSN(),
590 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700591 .errstr_unpriv = "R1 pointer comparison",
592 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700593 .result = ACCEPT,
594 },
595 {
596 "jump test 2",
597 .insns = {
598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
600 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
601 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
603 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
604 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
605 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
606 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
607 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
609 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
610 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
612 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
613 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
615 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
616 BPF_MOV64_IMM(BPF_REG_0, 0),
617 BPF_EXIT_INSN(),
618 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700619 .errstr_unpriv = "R1 pointer comparison",
620 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700621 .result = ACCEPT,
622 },
623 {
624 "jump test 3",
625 .insns = {
626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
627 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
628 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
630 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
631 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
632 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
634 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
635 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
636 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
638 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
639 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
640 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
642 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
643 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
644 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
646 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
648 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
650 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200651 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
652 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700653 BPF_EXIT_INSN(),
654 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200655 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700656 .errstr_unpriv = "R1 pointer comparison",
657 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700658 .result = ACCEPT,
659 },
660 {
661 "jump test 4",
662 .insns = {
663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
672 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
674 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
696 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
697 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
699 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
700 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
701 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
702 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
703 BPF_MOV64_IMM(BPF_REG_0, 0),
704 BPF_EXIT_INSN(),
705 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700706 .errstr_unpriv = "R1 pointer comparison",
707 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700708 .result = ACCEPT,
709 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700710 {
711 "jump test 5",
712 .insns = {
713 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
714 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
715 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
716 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
717 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
718 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
719 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
720 BPF_MOV64_IMM(BPF_REG_0, 0),
721 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
722 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
723 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
724 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
725 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
726 BPF_MOV64_IMM(BPF_REG_0, 0),
727 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
728 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
729 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
730 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
731 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
732 BPF_MOV64_IMM(BPF_REG_0, 0),
733 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
734 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
735 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
736 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
737 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
738 BPF_MOV64_IMM(BPF_REG_0, 0),
739 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
740 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
741 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
742 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
743 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
744 BPF_MOV64_IMM(BPF_REG_0, 0),
745 BPF_EXIT_INSN(),
746 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700747 .errstr_unpriv = "R1 pointer comparison",
748 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700749 .result = ACCEPT,
750 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700751 {
752 "access skb fields ok",
753 .insns = {
754 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
755 offsetof(struct __sk_buff, len)),
756 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
757 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
758 offsetof(struct __sk_buff, mark)),
759 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
760 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
761 offsetof(struct __sk_buff, pkt_type)),
762 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
763 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
764 offsetof(struct __sk_buff, queue_mapping)),
765 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700766 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
767 offsetof(struct __sk_buff, protocol)),
768 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
769 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
770 offsetof(struct __sk_buff, vlan_present)),
771 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
772 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
773 offsetof(struct __sk_buff, vlan_tci)),
774 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700775 BPF_EXIT_INSN(),
776 },
777 .result = ACCEPT,
778 },
779 {
780 "access skb fields bad1",
781 .insns = {
782 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
783 BPF_EXIT_INSN(),
784 },
785 .errstr = "invalid bpf_context access",
786 .result = REJECT,
787 },
788 {
789 "access skb fields bad2",
790 .insns = {
791 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
792 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
793 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
795 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
797 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700798 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
799 BPF_EXIT_INSN(),
800 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
801 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
802 offsetof(struct __sk_buff, pkt_type)),
803 BPF_EXIT_INSN(),
804 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200805 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700806 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700807 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700808 .result = REJECT,
809 },
810 {
811 "access skb fields bad3",
812 .insns = {
813 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
814 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
815 offsetof(struct __sk_buff, pkt_type)),
816 BPF_EXIT_INSN(),
817 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
818 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
820 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
822 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700823 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
824 BPF_EXIT_INSN(),
825 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
826 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
827 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200828 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700829 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700830 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700831 .result = REJECT,
832 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700833 {
834 "access skb fields bad4",
835 .insns = {
836 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
837 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
838 offsetof(struct __sk_buff, len)),
839 BPF_MOV64_IMM(BPF_REG_0, 0),
840 BPF_EXIT_INSN(),
841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
844 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
846 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700847 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
848 BPF_EXIT_INSN(),
849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
850 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
851 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200852 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700853 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700854 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700855 .result = REJECT,
856 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700857 {
858 "check skb->mark is not writeable by sockets",
859 .insns = {
860 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
861 offsetof(struct __sk_buff, mark)),
862 BPF_EXIT_INSN(),
863 },
864 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700865 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700866 .result = REJECT,
867 },
868 {
869 "check skb->tc_index is not writeable by sockets",
870 .insns = {
871 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
872 offsetof(struct __sk_buff, tc_index)),
873 BPF_EXIT_INSN(),
874 },
875 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700876 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700877 .result = REJECT,
878 },
879 {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100880 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700881 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +0100882 BPF_MOV64_IMM(BPF_REG_0, 0),
883 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
884 offsetof(struct __sk_buff, cb[0])),
885 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
886 offsetof(struct __sk_buff, cb[0]) + 1),
887 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
888 offsetof(struct __sk_buff, cb[0]) + 2),
889 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
890 offsetof(struct __sk_buff, cb[0]) + 3),
891 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
892 offsetof(struct __sk_buff, cb[1])),
893 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
894 offsetof(struct __sk_buff, cb[1]) + 1),
895 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
896 offsetof(struct __sk_buff, cb[1]) + 2),
897 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
898 offsetof(struct __sk_buff, cb[1]) + 3),
899 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
900 offsetof(struct __sk_buff, cb[2])),
901 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
902 offsetof(struct __sk_buff, cb[2]) + 1),
903 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
904 offsetof(struct __sk_buff, cb[2]) + 2),
905 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
906 offsetof(struct __sk_buff, cb[2]) + 3),
907 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
908 offsetof(struct __sk_buff, cb[3])),
909 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
910 offsetof(struct __sk_buff, cb[3]) + 1),
911 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
912 offsetof(struct __sk_buff, cb[3]) + 2),
913 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
914 offsetof(struct __sk_buff, cb[3]) + 3),
915 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
916 offsetof(struct __sk_buff, cb[4])),
917 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
918 offsetof(struct __sk_buff, cb[4]) + 1),
919 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
920 offsetof(struct __sk_buff, cb[4]) + 2),
921 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
922 offsetof(struct __sk_buff, cb[4]) + 3),
923 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
924 offsetof(struct __sk_buff, cb[0])),
925 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
926 offsetof(struct __sk_buff, cb[0]) + 1),
927 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
928 offsetof(struct __sk_buff, cb[0]) + 2),
929 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
930 offsetof(struct __sk_buff, cb[0]) + 3),
931 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
932 offsetof(struct __sk_buff, cb[1])),
933 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
934 offsetof(struct __sk_buff, cb[1]) + 1),
935 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
936 offsetof(struct __sk_buff, cb[1]) + 2),
937 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
938 offsetof(struct __sk_buff, cb[1]) + 3),
939 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
940 offsetof(struct __sk_buff, cb[2])),
941 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
942 offsetof(struct __sk_buff, cb[2]) + 1),
943 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
944 offsetof(struct __sk_buff, cb[2]) + 2),
945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
946 offsetof(struct __sk_buff, cb[2]) + 3),
947 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
948 offsetof(struct __sk_buff, cb[3])),
949 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
950 offsetof(struct __sk_buff, cb[3]) + 1),
951 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
952 offsetof(struct __sk_buff, cb[3]) + 2),
953 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
954 offsetof(struct __sk_buff, cb[3]) + 3),
955 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
956 offsetof(struct __sk_buff, cb[4])),
957 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
958 offsetof(struct __sk_buff, cb[4]) + 1),
959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
960 offsetof(struct __sk_buff, cb[4]) + 2),
961 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
962 offsetof(struct __sk_buff, cb[4]) + 3),
963 BPF_EXIT_INSN(),
964 },
965 .result = ACCEPT,
966 },
967 {
968 "check cb access: byte, oob 1",
969 .insns = {
970 BPF_MOV64_IMM(BPF_REG_0, 0),
971 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
972 offsetof(struct __sk_buff, cb[4]) + 4),
973 BPF_EXIT_INSN(),
974 },
975 .errstr = "invalid bpf_context access",
976 .result = REJECT,
977 },
978 {
979 "check cb access: byte, oob 2",
980 .insns = {
981 BPF_MOV64_IMM(BPF_REG_0, 0),
982 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
983 offsetof(struct __sk_buff, cb[0]) - 1),
984 BPF_EXIT_INSN(),
985 },
986 .errstr = "invalid bpf_context access",
987 .result = REJECT,
988 },
989 {
990 "check cb access: byte, oob 3",
991 .insns = {
992 BPF_MOV64_IMM(BPF_REG_0, 0),
993 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
994 offsetof(struct __sk_buff, cb[4]) + 4),
995 BPF_EXIT_INSN(),
996 },
997 .errstr = "invalid bpf_context access",
998 .result = REJECT,
999 },
1000 {
1001 "check cb access: byte, oob 4",
1002 .insns = {
1003 BPF_MOV64_IMM(BPF_REG_0, 0),
1004 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1005 offsetof(struct __sk_buff, cb[0]) - 1),
1006 BPF_EXIT_INSN(),
1007 },
1008 .errstr = "invalid bpf_context access",
1009 .result = REJECT,
1010 },
1011 {
1012 "check cb access: byte, wrong type",
1013 .insns = {
1014 BPF_MOV64_IMM(BPF_REG_0, 0),
1015 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001016 offsetof(struct __sk_buff, cb[0])),
1017 BPF_EXIT_INSN(),
1018 },
1019 .errstr = "invalid bpf_context access",
1020 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001021 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1022 },
1023 {
1024 "check cb access: half",
1025 .insns = {
1026 BPF_MOV64_IMM(BPF_REG_0, 0),
1027 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1028 offsetof(struct __sk_buff, cb[0])),
1029 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1030 offsetof(struct __sk_buff, cb[0]) + 2),
1031 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1032 offsetof(struct __sk_buff, cb[1])),
1033 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1034 offsetof(struct __sk_buff, cb[1]) + 2),
1035 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1036 offsetof(struct __sk_buff, cb[2])),
1037 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1038 offsetof(struct __sk_buff, cb[2]) + 2),
1039 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1040 offsetof(struct __sk_buff, cb[3])),
1041 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1042 offsetof(struct __sk_buff, cb[3]) + 2),
1043 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1044 offsetof(struct __sk_buff, cb[4])),
1045 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1046 offsetof(struct __sk_buff, cb[4]) + 2),
1047 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1048 offsetof(struct __sk_buff, cb[0])),
1049 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1050 offsetof(struct __sk_buff, cb[0]) + 2),
1051 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, cb[1])),
1053 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1054 offsetof(struct __sk_buff, cb[1]) + 2),
1055 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1056 offsetof(struct __sk_buff, cb[2])),
1057 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1058 offsetof(struct __sk_buff, cb[2]) + 2),
1059 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1060 offsetof(struct __sk_buff, cb[3])),
1061 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, cb[3]) + 2),
1063 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, cb[4])),
1065 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1066 offsetof(struct __sk_buff, cb[4]) + 2),
1067 BPF_EXIT_INSN(),
1068 },
1069 .result = ACCEPT,
1070 },
1071 {
1072 "check cb access: half, unaligned",
1073 .insns = {
1074 BPF_MOV64_IMM(BPF_REG_0, 0),
1075 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1076 offsetof(struct __sk_buff, cb[0]) + 1),
1077 BPF_EXIT_INSN(),
1078 },
1079 .errstr = "misaligned access",
1080 .result = REJECT,
1081 },
1082 {
1083 "check cb access: half, oob 1",
1084 .insns = {
1085 BPF_MOV64_IMM(BPF_REG_0, 0),
1086 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1087 offsetof(struct __sk_buff, cb[4]) + 4),
1088 BPF_EXIT_INSN(),
1089 },
1090 .errstr = "invalid bpf_context access",
1091 .result = REJECT,
1092 },
1093 {
1094 "check cb access: half, oob 2",
1095 .insns = {
1096 BPF_MOV64_IMM(BPF_REG_0, 0),
1097 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1098 offsetof(struct __sk_buff, cb[0]) - 2),
1099 BPF_EXIT_INSN(),
1100 },
1101 .errstr = "invalid bpf_context access",
1102 .result = REJECT,
1103 },
1104 {
1105 "check cb access: half, oob 3",
1106 .insns = {
1107 BPF_MOV64_IMM(BPF_REG_0, 0),
1108 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1109 offsetof(struct __sk_buff, cb[4]) + 4),
1110 BPF_EXIT_INSN(),
1111 },
1112 .errstr = "invalid bpf_context access",
1113 .result = REJECT,
1114 },
1115 {
1116 "check cb access: half, oob 4",
1117 .insns = {
1118 BPF_MOV64_IMM(BPF_REG_0, 0),
1119 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1120 offsetof(struct __sk_buff, cb[0]) - 2),
1121 BPF_EXIT_INSN(),
1122 },
1123 .errstr = "invalid bpf_context access",
1124 .result = REJECT,
1125 },
1126 {
1127 "check cb access: half, wrong type",
1128 .insns = {
1129 BPF_MOV64_IMM(BPF_REG_0, 0),
1130 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1131 offsetof(struct __sk_buff, cb[0])),
1132 BPF_EXIT_INSN(),
1133 },
1134 .errstr = "invalid bpf_context access",
1135 .result = REJECT,
1136 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1137 },
1138 {
1139 "check cb access: word",
1140 .insns = {
1141 BPF_MOV64_IMM(BPF_REG_0, 0),
1142 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1143 offsetof(struct __sk_buff, cb[0])),
1144 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1145 offsetof(struct __sk_buff, cb[1])),
1146 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1147 offsetof(struct __sk_buff, cb[2])),
1148 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1149 offsetof(struct __sk_buff, cb[3])),
1150 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1151 offsetof(struct __sk_buff, cb[4])),
1152 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1153 offsetof(struct __sk_buff, cb[0])),
1154 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1155 offsetof(struct __sk_buff, cb[1])),
1156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1157 offsetof(struct __sk_buff, cb[2])),
1158 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1159 offsetof(struct __sk_buff, cb[3])),
1160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1161 offsetof(struct __sk_buff, cb[4])),
1162 BPF_EXIT_INSN(),
1163 },
1164 .result = ACCEPT,
1165 },
1166 {
1167 "check cb access: word, unaligned 1",
1168 .insns = {
1169 BPF_MOV64_IMM(BPF_REG_0, 0),
1170 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1171 offsetof(struct __sk_buff, cb[0]) + 2),
1172 BPF_EXIT_INSN(),
1173 },
1174 .errstr = "misaligned access",
1175 .result = REJECT,
1176 },
1177 {
1178 "check cb access: word, unaligned 2",
1179 .insns = {
1180 BPF_MOV64_IMM(BPF_REG_0, 0),
1181 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1182 offsetof(struct __sk_buff, cb[4]) + 1),
1183 BPF_EXIT_INSN(),
1184 },
1185 .errstr = "misaligned access",
1186 .result = REJECT,
1187 },
1188 {
1189 "check cb access: word, unaligned 3",
1190 .insns = {
1191 BPF_MOV64_IMM(BPF_REG_0, 0),
1192 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1193 offsetof(struct __sk_buff, cb[4]) + 2),
1194 BPF_EXIT_INSN(),
1195 },
1196 .errstr = "misaligned access",
1197 .result = REJECT,
1198 },
1199 {
1200 "check cb access: word, unaligned 4",
1201 .insns = {
1202 BPF_MOV64_IMM(BPF_REG_0, 0),
1203 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1204 offsetof(struct __sk_buff, cb[4]) + 3),
1205 BPF_EXIT_INSN(),
1206 },
1207 .errstr = "misaligned access",
1208 .result = REJECT,
1209 },
1210 {
1211 "check cb access: double",
1212 .insns = {
1213 BPF_MOV64_IMM(BPF_REG_0, 0),
1214 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1215 offsetof(struct __sk_buff, cb[0])),
1216 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1217 offsetof(struct __sk_buff, cb[2])),
1218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1219 offsetof(struct __sk_buff, cb[0])),
1220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1221 offsetof(struct __sk_buff, cb[2])),
1222 BPF_EXIT_INSN(),
1223 },
1224 .result = ACCEPT,
1225 },
1226 {
1227 "check cb access: double, unaligned 1",
1228 .insns = {
1229 BPF_MOV64_IMM(BPF_REG_0, 0),
1230 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1231 offsetof(struct __sk_buff, cb[1])),
1232 BPF_EXIT_INSN(),
1233 },
1234 .errstr = "misaligned access",
1235 .result = REJECT,
1236 },
1237 {
1238 "check cb access: double, unaligned 2",
1239 .insns = {
1240 BPF_MOV64_IMM(BPF_REG_0, 0),
1241 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1242 offsetof(struct __sk_buff, cb[3])),
1243 BPF_EXIT_INSN(),
1244 },
1245 .errstr = "misaligned access",
1246 .result = REJECT,
1247 },
1248 {
1249 "check cb access: double, oob 1",
1250 .insns = {
1251 BPF_MOV64_IMM(BPF_REG_0, 0),
1252 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1253 offsetof(struct __sk_buff, cb[4])),
1254 BPF_EXIT_INSN(),
1255 },
1256 .errstr = "invalid bpf_context access",
1257 .result = REJECT,
1258 },
1259 {
1260 "check cb access: double, oob 2",
1261 .insns = {
1262 BPF_MOV64_IMM(BPF_REG_0, 0),
1263 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1264 offsetof(struct __sk_buff, cb[4]) + 8),
1265 BPF_EXIT_INSN(),
1266 },
1267 .errstr = "invalid bpf_context access",
1268 .result = REJECT,
1269 },
1270 {
1271 "check cb access: double, oob 3",
1272 .insns = {
1273 BPF_MOV64_IMM(BPF_REG_0, 0),
1274 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1275 offsetof(struct __sk_buff, cb[0]) - 8),
1276 BPF_EXIT_INSN(),
1277 },
1278 .errstr = "invalid bpf_context access",
1279 .result = REJECT,
1280 },
1281 {
1282 "check cb access: double, oob 4",
1283 .insns = {
1284 BPF_MOV64_IMM(BPF_REG_0, 0),
1285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1286 offsetof(struct __sk_buff, cb[4])),
1287 BPF_EXIT_INSN(),
1288 },
1289 .errstr = "invalid bpf_context access",
1290 .result = REJECT,
1291 },
1292 {
1293 "check cb access: double, oob 5",
1294 .insns = {
1295 BPF_MOV64_IMM(BPF_REG_0, 0),
1296 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1297 offsetof(struct __sk_buff, cb[4]) + 8),
1298 BPF_EXIT_INSN(),
1299 },
1300 .errstr = "invalid bpf_context access",
1301 .result = REJECT,
1302 },
1303 {
1304 "check cb access: double, oob 6",
1305 .insns = {
1306 BPF_MOV64_IMM(BPF_REG_0, 0),
1307 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1308 offsetof(struct __sk_buff, cb[0]) - 8),
1309 BPF_EXIT_INSN(),
1310 },
1311 .errstr = "invalid bpf_context access",
1312 .result = REJECT,
1313 },
1314 {
1315 "check cb access: double, wrong type",
1316 .insns = {
1317 BPF_MOV64_IMM(BPF_REG_0, 0),
1318 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1319 offsetof(struct __sk_buff, cb[0])),
1320 BPF_EXIT_INSN(),
1321 },
1322 .errstr = "invalid bpf_context access",
1323 .result = REJECT,
1324 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001325 },
1326 {
1327 "check out of range skb->cb access",
1328 .insns = {
1329 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001330 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001331 BPF_EXIT_INSN(),
1332 },
1333 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001334 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001335 .result = REJECT,
1336 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1337 },
1338 {
1339 "write skb fields from socket prog",
1340 .insns = {
1341 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1342 offsetof(struct __sk_buff, cb[4])),
1343 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1344 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, mark)),
1346 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, tc_index)),
1348 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1349 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1350 offsetof(struct __sk_buff, cb[0])),
1351 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1352 offsetof(struct __sk_buff, cb[2])),
1353 BPF_EXIT_INSN(),
1354 },
1355 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001356 .errstr_unpriv = "R1 leaks addr",
1357 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001358 },
1359 {
1360 "write skb fields from tc_cls_act prog",
1361 .insns = {
1362 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1363 offsetof(struct __sk_buff, cb[0])),
1364 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1365 offsetof(struct __sk_buff, mark)),
1366 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1367 offsetof(struct __sk_buff, tc_index)),
1368 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1369 offsetof(struct __sk_buff, tc_index)),
1370 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1371 offsetof(struct __sk_buff, cb[3])),
1372 BPF_EXIT_INSN(),
1373 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001374 .errstr_unpriv = "",
1375 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001376 .result = ACCEPT,
1377 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1378 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001379 {
1380 "PTR_TO_STACK store/load",
1381 .insns = {
1382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1384 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1386 BPF_EXIT_INSN(),
1387 },
1388 .result = ACCEPT,
1389 },
1390 {
1391 "PTR_TO_STACK store/load - bad alignment on off",
1392 .insns = {
1393 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1395 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1396 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1397 BPF_EXIT_INSN(),
1398 },
1399 .result = REJECT,
1400 .errstr = "misaligned access off -6 size 8",
1401 },
1402 {
1403 "PTR_TO_STACK store/load - bad alignment on reg",
1404 .insns = {
1405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1407 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1408 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1409 BPF_EXIT_INSN(),
1410 },
1411 .result = REJECT,
1412 .errstr = "misaligned access off -2 size 8",
1413 },
1414 {
1415 "PTR_TO_STACK store/load - out of bounds low",
1416 .insns = {
1417 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1419 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1420 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1421 BPF_EXIT_INSN(),
1422 },
1423 .result = REJECT,
1424 .errstr = "invalid stack off=-79992 size=8",
1425 },
1426 {
1427 "PTR_TO_STACK store/load - out of bounds high",
1428 .insns = {
1429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1431 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1432 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1433 BPF_EXIT_INSN(),
1434 },
1435 .result = REJECT,
1436 .errstr = "invalid stack off=0 size=8",
1437 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001438 {
1439 "unpriv: return pointer",
1440 .insns = {
1441 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1442 BPF_EXIT_INSN(),
1443 },
1444 .result = ACCEPT,
1445 .result_unpriv = REJECT,
1446 .errstr_unpriv = "R0 leaks addr",
1447 },
1448 {
1449 "unpriv: add const to pointer",
1450 .insns = {
1451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1452 BPF_MOV64_IMM(BPF_REG_0, 0),
1453 BPF_EXIT_INSN(),
1454 },
1455 .result = ACCEPT,
1456 .result_unpriv = REJECT,
1457 .errstr_unpriv = "R1 pointer arithmetic",
1458 },
1459 {
1460 "unpriv: add pointer to pointer",
1461 .insns = {
1462 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1463 BPF_MOV64_IMM(BPF_REG_0, 0),
1464 BPF_EXIT_INSN(),
1465 },
1466 .result = ACCEPT,
1467 .result_unpriv = REJECT,
1468 .errstr_unpriv = "R1 pointer arithmetic",
1469 },
1470 {
1471 "unpriv: neg pointer",
1472 .insns = {
1473 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1474 BPF_MOV64_IMM(BPF_REG_0, 0),
1475 BPF_EXIT_INSN(),
1476 },
1477 .result = ACCEPT,
1478 .result_unpriv = REJECT,
1479 .errstr_unpriv = "R1 pointer arithmetic",
1480 },
1481 {
1482 "unpriv: cmp pointer with const",
1483 .insns = {
1484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1485 BPF_MOV64_IMM(BPF_REG_0, 0),
1486 BPF_EXIT_INSN(),
1487 },
1488 .result = ACCEPT,
1489 .result_unpriv = REJECT,
1490 .errstr_unpriv = "R1 pointer comparison",
1491 },
1492 {
1493 "unpriv: cmp pointer with pointer",
1494 .insns = {
1495 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1496 BPF_MOV64_IMM(BPF_REG_0, 0),
1497 BPF_EXIT_INSN(),
1498 },
1499 .result = ACCEPT,
1500 .result_unpriv = REJECT,
1501 .errstr_unpriv = "R10 pointer comparison",
1502 },
1503 {
1504 "unpriv: check that printk is disallowed",
1505 .insns = {
1506 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1509 BPF_MOV64_IMM(BPF_REG_2, 8),
1510 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1512 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001513 BPF_MOV64_IMM(BPF_REG_0, 0),
1514 BPF_EXIT_INSN(),
1515 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001516 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001517 .result_unpriv = REJECT,
1518 .result = ACCEPT,
1519 },
1520 {
1521 "unpriv: pass pointer to helper function",
1522 .insns = {
1523 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1524 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1526 BPF_LD_MAP_FD(BPF_REG_1, 0),
1527 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1528 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1530 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001531 BPF_MOV64_IMM(BPF_REG_0, 0),
1532 BPF_EXIT_INSN(),
1533 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001534 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001535 .errstr_unpriv = "R4 leaks addr",
1536 .result_unpriv = REJECT,
1537 .result = ACCEPT,
1538 },
1539 {
1540 "unpriv: indirectly pass pointer on stack to helper function",
1541 .insns = {
1542 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1543 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1545 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1547 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001548 BPF_MOV64_IMM(BPF_REG_0, 0),
1549 BPF_EXIT_INSN(),
1550 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001551 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001552 .errstr = "invalid indirect read from stack off -8+0 size 8",
1553 .result = REJECT,
1554 },
1555 {
1556 "unpriv: mangle pointer on stack 1",
1557 .insns = {
1558 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1559 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1560 BPF_MOV64_IMM(BPF_REG_0, 0),
1561 BPF_EXIT_INSN(),
1562 },
1563 .errstr_unpriv = "attempt to corrupt spilled",
1564 .result_unpriv = REJECT,
1565 .result = ACCEPT,
1566 },
1567 {
1568 "unpriv: mangle pointer on stack 2",
1569 .insns = {
1570 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1571 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1572 BPF_MOV64_IMM(BPF_REG_0, 0),
1573 BPF_EXIT_INSN(),
1574 },
1575 .errstr_unpriv = "attempt to corrupt spilled",
1576 .result_unpriv = REJECT,
1577 .result = ACCEPT,
1578 },
1579 {
1580 "unpriv: read pointer from stack in small chunks",
1581 .insns = {
1582 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1583 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1584 BPF_MOV64_IMM(BPF_REG_0, 0),
1585 BPF_EXIT_INSN(),
1586 },
1587 .errstr = "invalid size",
1588 .result = REJECT,
1589 },
1590 {
1591 "unpriv: write pointer into ctx",
1592 .insns = {
1593 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1594 BPF_MOV64_IMM(BPF_REG_0, 0),
1595 BPF_EXIT_INSN(),
1596 },
1597 .errstr_unpriv = "R1 leaks addr",
1598 .result_unpriv = REJECT,
1599 .errstr = "invalid bpf_context access",
1600 .result = REJECT,
1601 },
1602 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001603 "unpriv: spill/fill of ctx",
1604 .insns = {
1605 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1607 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1608 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1609 BPF_MOV64_IMM(BPF_REG_0, 0),
1610 BPF_EXIT_INSN(),
1611 },
1612 .result = ACCEPT,
1613 },
1614 {
1615 "unpriv: spill/fill of ctx 2",
1616 .insns = {
1617 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1619 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1620 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001621 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1622 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001623 BPF_EXIT_INSN(),
1624 },
1625 .result = ACCEPT,
1626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1627 },
1628 {
1629 "unpriv: spill/fill of ctx 3",
1630 .insns = {
1631 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1633 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1634 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1635 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1637 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001638 BPF_EXIT_INSN(),
1639 },
1640 .result = REJECT,
1641 .errstr = "R1 type=fp expected=ctx",
1642 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1643 },
1644 {
1645 "unpriv: spill/fill of ctx 4",
1646 .insns = {
1647 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1649 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1650 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001651 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1652 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001653 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1655 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001656 BPF_EXIT_INSN(),
1657 },
1658 .result = REJECT,
1659 .errstr = "R1 type=inv expected=ctx",
1660 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1661 },
1662 {
1663 "unpriv: spill/fill of different pointers stx",
1664 .insns = {
1665 BPF_MOV64_IMM(BPF_REG_3, 42),
1666 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1669 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1671 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1672 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1673 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1674 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1675 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1676 offsetof(struct __sk_buff, mark)),
1677 BPF_MOV64_IMM(BPF_REG_0, 0),
1678 BPF_EXIT_INSN(),
1679 },
1680 .result = REJECT,
1681 .errstr = "same insn cannot be used with different pointers",
1682 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1683 },
1684 {
1685 "unpriv: spill/fill of different pointers ldx",
1686 .insns = {
1687 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1692 -(__s32)offsetof(struct bpf_perf_event_data,
1693 sample_period) - 8),
1694 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1695 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1696 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1697 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1698 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1699 offsetof(struct bpf_perf_event_data,
1700 sample_period)),
1701 BPF_MOV64_IMM(BPF_REG_0, 0),
1702 BPF_EXIT_INSN(),
1703 },
1704 .result = REJECT,
1705 .errstr = "same insn cannot be used with different pointers",
1706 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1707 },
1708 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001709 "unpriv: write pointer into map elem value",
1710 .insns = {
1711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1714 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1716 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1718 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1719 BPF_EXIT_INSN(),
1720 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001721 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001722 .errstr_unpriv = "R0 leaks addr",
1723 .result_unpriv = REJECT,
1724 .result = ACCEPT,
1725 },
1726 {
1727 "unpriv: partial copy of pointer",
1728 .insns = {
1729 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1730 BPF_MOV64_IMM(BPF_REG_0, 0),
1731 BPF_EXIT_INSN(),
1732 },
1733 .errstr_unpriv = "R10 partial copy",
1734 .result_unpriv = REJECT,
1735 .result = ACCEPT,
1736 },
1737 {
1738 "unpriv: pass pointer to tail_call",
1739 .insns = {
1740 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1741 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001742 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1743 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001744 BPF_MOV64_IMM(BPF_REG_0, 0),
1745 BPF_EXIT_INSN(),
1746 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001747 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001748 .errstr_unpriv = "R3 leaks addr into helper",
1749 .result_unpriv = REJECT,
1750 .result = ACCEPT,
1751 },
1752 {
1753 "unpriv: cmp map pointer with zero",
1754 .insns = {
1755 BPF_MOV64_IMM(BPF_REG_1, 0),
1756 BPF_LD_MAP_FD(BPF_REG_1, 0),
1757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1758 BPF_MOV64_IMM(BPF_REG_0, 0),
1759 BPF_EXIT_INSN(),
1760 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001761 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001762 .errstr_unpriv = "R1 pointer comparison",
1763 .result_unpriv = REJECT,
1764 .result = ACCEPT,
1765 },
1766 {
1767 "unpriv: write into frame pointer",
1768 .insns = {
1769 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1770 BPF_MOV64_IMM(BPF_REG_0, 0),
1771 BPF_EXIT_INSN(),
1772 },
1773 .errstr = "frame pointer is read only",
1774 .result = REJECT,
1775 },
1776 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001777 "unpriv: spill/fill frame pointer",
1778 .insns = {
1779 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1781 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1782 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1783 BPF_MOV64_IMM(BPF_REG_0, 0),
1784 BPF_EXIT_INSN(),
1785 },
1786 .errstr = "frame pointer is read only",
1787 .result = REJECT,
1788 },
1789 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001790 "unpriv: cmp of frame pointer",
1791 .insns = {
1792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1793 BPF_MOV64_IMM(BPF_REG_0, 0),
1794 BPF_EXIT_INSN(),
1795 },
1796 .errstr_unpriv = "R10 pointer comparison",
1797 .result_unpriv = REJECT,
1798 .result = ACCEPT,
1799 },
1800 {
1801 "unpriv: cmp of stack pointer",
1802 .insns = {
1803 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1806 BPF_MOV64_IMM(BPF_REG_0, 0),
1807 BPF_EXIT_INSN(),
1808 },
1809 .errstr_unpriv = "R2 pointer comparison",
1810 .result_unpriv = REJECT,
1811 .result = ACCEPT,
1812 },
1813 {
1814 "unpriv: obfuscate stack pointer",
1815 .insns = {
1816 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1819 BPF_MOV64_IMM(BPF_REG_0, 0),
1820 BPF_EXIT_INSN(),
1821 },
1822 .errstr_unpriv = "R2 pointer arithmetic",
1823 .result_unpriv = REJECT,
1824 .result = ACCEPT,
1825 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001826 {
1827 "raw_stack: no skb_load_bytes",
1828 .insns = {
1829 BPF_MOV64_IMM(BPF_REG_2, 4),
1830 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1832 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1833 BPF_MOV64_IMM(BPF_REG_4, 8),
1834 /* Call to skb_load_bytes() omitted. */
1835 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1836 BPF_EXIT_INSN(),
1837 },
1838 .result = REJECT,
1839 .errstr = "invalid read from stack off -8+0 size 8",
1840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1841 },
1842 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001843 "raw_stack: skb_load_bytes, negative len",
1844 .insns = {
1845 BPF_MOV64_IMM(BPF_REG_2, 4),
1846 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1848 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1849 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1851 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1853 BPF_EXIT_INSN(),
1854 },
1855 .result = REJECT,
1856 .errstr = "invalid stack type R3",
1857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1858 },
1859 {
1860 "raw_stack: skb_load_bytes, negative len 2",
1861 .insns = {
1862 BPF_MOV64_IMM(BPF_REG_2, 4),
1863 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1865 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1866 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1868 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001869 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1870 BPF_EXIT_INSN(),
1871 },
1872 .result = REJECT,
1873 .errstr = "invalid stack type R3",
1874 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1875 },
1876 {
1877 "raw_stack: skb_load_bytes, zero len",
1878 .insns = {
1879 BPF_MOV64_IMM(BPF_REG_2, 4),
1880 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1882 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1883 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1885 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02001886 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1887 BPF_EXIT_INSN(),
1888 },
1889 .result = REJECT,
1890 .errstr = "invalid stack type R3",
1891 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1892 },
1893 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001894 "raw_stack: skb_load_bytes, no init",
1895 .insns = {
1896 BPF_MOV64_IMM(BPF_REG_2, 4),
1897 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1899 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1900 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1902 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001903 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1904 BPF_EXIT_INSN(),
1905 },
1906 .result = ACCEPT,
1907 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1908 },
1909 {
1910 "raw_stack: skb_load_bytes, init",
1911 .insns = {
1912 BPF_MOV64_IMM(BPF_REG_2, 4),
1913 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1915 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1916 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1917 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1919 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001920 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1921 BPF_EXIT_INSN(),
1922 },
1923 .result = ACCEPT,
1924 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1925 },
1926 {
1927 "raw_stack: skb_load_bytes, spilled regs around bounds",
1928 .insns = {
1929 BPF_MOV64_IMM(BPF_REG_2, 4),
1930 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001932 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1933 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001934 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1935 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001936 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1937 BPF_FUNC_skb_load_bytes),
1938 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1939 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001940 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1941 offsetof(struct __sk_buff, mark)),
1942 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1943 offsetof(struct __sk_buff, priority)),
1944 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1945 BPF_EXIT_INSN(),
1946 },
1947 .result = ACCEPT,
1948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1949 },
1950 {
1951 "raw_stack: skb_load_bytes, spilled regs corruption",
1952 .insns = {
1953 BPF_MOV64_IMM(BPF_REG_2, 4),
1954 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001956 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001957 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1958 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1960 BPF_FUNC_skb_load_bytes),
1961 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001962 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1963 offsetof(struct __sk_buff, mark)),
1964 BPF_EXIT_INSN(),
1965 },
1966 .result = REJECT,
1967 .errstr = "R0 invalid mem access 'inv'",
1968 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1969 },
1970 {
1971 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1972 .insns = {
1973 BPF_MOV64_IMM(BPF_REG_2, 4),
1974 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001976 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1977 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1978 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001979 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1980 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1982 BPF_FUNC_skb_load_bytes),
1983 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1984 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1985 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02001986 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1987 offsetof(struct __sk_buff, mark)),
1988 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1989 offsetof(struct __sk_buff, priority)),
1990 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1991 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
1992 offsetof(struct __sk_buff, pkt_type)),
1993 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1994 BPF_EXIT_INSN(),
1995 },
1996 .result = REJECT,
1997 .errstr = "R3 invalid mem access 'inv'",
1998 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1999 },
2000 {
2001 "raw_stack: skb_load_bytes, spilled regs + data",
2002 .insns = {
2003 BPF_MOV64_IMM(BPF_REG_2, 4),
2004 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002006 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2007 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2008 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002009 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2010 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2012 BPF_FUNC_skb_load_bytes),
2013 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2014 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2015 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2017 offsetof(struct __sk_buff, mark)),
2018 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2019 offsetof(struct __sk_buff, priority)),
2020 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2021 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2022 BPF_EXIT_INSN(),
2023 },
2024 .result = ACCEPT,
2025 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2026 },
2027 {
2028 "raw_stack: skb_load_bytes, invalid access 1",
2029 .insns = {
2030 BPF_MOV64_IMM(BPF_REG_2, 4),
2031 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2033 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2034 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2036 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002037 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2038 BPF_EXIT_INSN(),
2039 },
2040 .result = REJECT,
2041 .errstr = "invalid stack type R3 off=-513 access_size=8",
2042 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2043 },
2044 {
2045 "raw_stack: skb_load_bytes, invalid access 2",
2046 .insns = {
2047 BPF_MOV64_IMM(BPF_REG_2, 4),
2048 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2050 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2051 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002052 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2053 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2055 BPF_EXIT_INSN(),
2056 },
2057 .result = REJECT,
2058 .errstr = "invalid stack type R3 off=-1 access_size=8",
2059 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2060 },
2061 {
2062 "raw_stack: skb_load_bytes, invalid access 3",
2063 .insns = {
2064 BPF_MOV64_IMM(BPF_REG_2, 4),
2065 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2067 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2068 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2070 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002071 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2072 BPF_EXIT_INSN(),
2073 },
2074 .result = REJECT,
2075 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2077 },
2078 {
2079 "raw_stack: skb_load_bytes, invalid access 4",
2080 .insns = {
2081 BPF_MOV64_IMM(BPF_REG_2, 4),
2082 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2084 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2085 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2087 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002088 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2089 BPF_EXIT_INSN(),
2090 },
2091 .result = REJECT,
2092 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2093 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2094 },
2095 {
2096 "raw_stack: skb_load_bytes, invalid access 5",
2097 .insns = {
2098 BPF_MOV64_IMM(BPF_REG_2, 4),
2099 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2101 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2102 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2104 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002105 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2106 BPF_EXIT_INSN(),
2107 },
2108 .result = REJECT,
2109 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2111 },
2112 {
2113 "raw_stack: skb_load_bytes, invalid access 6",
2114 .insns = {
2115 BPF_MOV64_IMM(BPF_REG_2, 4),
2116 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2118 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2119 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2121 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002122 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2123 BPF_EXIT_INSN(),
2124 },
2125 .result = REJECT,
2126 .errstr = "invalid stack type R3 off=-512 access_size=0",
2127 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2128 },
2129 {
2130 "raw_stack: skb_load_bytes, large access",
2131 .insns = {
2132 BPF_MOV64_IMM(BPF_REG_2, 4),
2133 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2135 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2136 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2138 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002139 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2140 BPF_EXIT_INSN(),
2141 },
2142 .result = ACCEPT,
2143 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2144 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002145 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002146 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002147 .insns = {
2148 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2149 offsetof(struct __sk_buff, data)),
2150 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2151 offsetof(struct __sk_buff, data_end)),
2152 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2154 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2155 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2156 BPF_MOV64_IMM(BPF_REG_0, 0),
2157 BPF_EXIT_INSN(),
2158 },
2159 .result = ACCEPT,
2160 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2161 },
2162 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002163 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002164 .insns = {
2165 BPF_MOV64_IMM(BPF_REG_0, 1),
2166 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2167 offsetof(struct __sk_buff, data_end)),
2168 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2169 offsetof(struct __sk_buff, data)),
2170 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2171 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2172 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2173 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2174 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2175 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2176 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2177 offsetof(struct __sk_buff, data)),
2178 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2180 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2181 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2182 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2185 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2186 offsetof(struct __sk_buff, data_end)),
2187 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2188 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2189 BPF_MOV64_IMM(BPF_REG_0, 0),
2190 BPF_EXIT_INSN(),
2191 },
2192 .result = ACCEPT,
2193 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2194 },
2195 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002196 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002197 .insns = {
2198 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2199 offsetof(struct __sk_buff, data)),
2200 BPF_MOV64_IMM(BPF_REG_0, 0),
2201 BPF_EXIT_INSN(),
2202 },
2203 .errstr = "invalid bpf_context access off=76",
2204 .result = REJECT,
2205 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2206 },
2207 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002208 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002209 .insns = {
2210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2211 offsetof(struct __sk_buff, data)),
2212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2213 offsetof(struct __sk_buff, data_end)),
2214 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2216 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2217 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2218 BPF_MOV64_IMM(BPF_REG_0, 0),
2219 BPF_EXIT_INSN(),
2220 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002221 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002222 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2223 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002224 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002225 "direct packet access: test5 (pkt_end >= reg, good access)",
2226 .insns = {
2227 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2228 offsetof(struct __sk_buff, data)),
2229 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2230 offsetof(struct __sk_buff, data_end)),
2231 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2233 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2234 BPF_MOV64_IMM(BPF_REG_0, 1),
2235 BPF_EXIT_INSN(),
2236 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2237 BPF_MOV64_IMM(BPF_REG_0, 0),
2238 BPF_EXIT_INSN(),
2239 },
2240 .result = ACCEPT,
2241 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2242 },
2243 {
2244 "direct packet access: test6 (pkt_end >= reg, bad access)",
2245 .insns = {
2246 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2247 offsetof(struct __sk_buff, data)),
2248 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2249 offsetof(struct __sk_buff, data_end)),
2250 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2252 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2253 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2254 BPF_MOV64_IMM(BPF_REG_0, 1),
2255 BPF_EXIT_INSN(),
2256 BPF_MOV64_IMM(BPF_REG_0, 0),
2257 BPF_EXIT_INSN(),
2258 },
2259 .errstr = "invalid access to packet",
2260 .result = REJECT,
2261 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2262 },
2263 {
2264 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2265 .insns = {
2266 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2267 offsetof(struct __sk_buff, data)),
2268 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2269 offsetof(struct __sk_buff, data_end)),
2270 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2272 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2273 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2274 BPF_MOV64_IMM(BPF_REG_0, 1),
2275 BPF_EXIT_INSN(),
2276 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2277 BPF_MOV64_IMM(BPF_REG_0, 0),
2278 BPF_EXIT_INSN(),
2279 },
2280 .errstr = "invalid access to packet",
2281 .result = REJECT,
2282 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2283 },
2284 {
2285 "direct packet access: test8 (double test, variant 1)",
2286 .insns = {
2287 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2288 offsetof(struct __sk_buff, data)),
2289 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2290 offsetof(struct __sk_buff, data_end)),
2291 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2293 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2294 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2295 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2296 BPF_MOV64_IMM(BPF_REG_0, 1),
2297 BPF_EXIT_INSN(),
2298 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2299 BPF_MOV64_IMM(BPF_REG_0, 0),
2300 BPF_EXIT_INSN(),
2301 },
2302 .result = ACCEPT,
2303 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2304 },
2305 {
2306 "direct packet access: test9 (double test, variant 2)",
2307 .insns = {
2308 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2309 offsetof(struct __sk_buff, data)),
2310 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2311 offsetof(struct __sk_buff, data_end)),
2312 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2314 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2315 BPF_MOV64_IMM(BPF_REG_0, 1),
2316 BPF_EXIT_INSN(),
2317 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2318 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2319 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2320 BPF_MOV64_IMM(BPF_REG_0, 0),
2321 BPF_EXIT_INSN(),
2322 },
2323 .result = ACCEPT,
2324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2325 },
2326 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002327 "direct packet access: test10 (write invalid)",
2328 .insns = {
2329 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2330 offsetof(struct __sk_buff, data)),
2331 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2332 offsetof(struct __sk_buff, data_end)),
2333 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2335 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2336 BPF_MOV64_IMM(BPF_REG_0, 0),
2337 BPF_EXIT_INSN(),
2338 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2339 BPF_MOV64_IMM(BPF_REG_0, 0),
2340 BPF_EXIT_INSN(),
2341 },
2342 .errstr = "invalid access to packet",
2343 .result = REJECT,
2344 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2345 },
2346 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002347 "direct packet access: test11 (shift, good access)",
2348 .insns = {
2349 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2350 offsetof(struct __sk_buff, data)),
2351 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2352 offsetof(struct __sk_buff, data_end)),
2353 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2355 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2356 BPF_MOV64_IMM(BPF_REG_3, 144),
2357 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2359 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2360 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2361 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2362 BPF_MOV64_IMM(BPF_REG_0, 1),
2363 BPF_EXIT_INSN(),
2364 BPF_MOV64_IMM(BPF_REG_0, 0),
2365 BPF_EXIT_INSN(),
2366 },
2367 .result = ACCEPT,
2368 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2369 },
2370 {
2371 "direct packet access: test12 (and, good access)",
2372 .insns = {
2373 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2374 offsetof(struct __sk_buff, data)),
2375 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2376 offsetof(struct __sk_buff, data_end)),
2377 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2379 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2380 BPF_MOV64_IMM(BPF_REG_3, 144),
2381 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2383 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2384 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2385 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2386 BPF_MOV64_IMM(BPF_REG_0, 1),
2387 BPF_EXIT_INSN(),
2388 BPF_MOV64_IMM(BPF_REG_0, 0),
2389 BPF_EXIT_INSN(),
2390 },
2391 .result = ACCEPT,
2392 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2393 },
2394 {
2395 "direct packet access: test13 (branches, good access)",
2396 .insns = {
2397 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2398 offsetof(struct __sk_buff, data)),
2399 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2400 offsetof(struct __sk_buff, data_end)),
2401 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2403 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2404 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2405 offsetof(struct __sk_buff, mark)),
2406 BPF_MOV64_IMM(BPF_REG_4, 1),
2407 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2408 BPF_MOV64_IMM(BPF_REG_3, 14),
2409 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2410 BPF_MOV64_IMM(BPF_REG_3, 24),
2411 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2413 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2414 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2415 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2416 BPF_MOV64_IMM(BPF_REG_0, 1),
2417 BPF_EXIT_INSN(),
2418 BPF_MOV64_IMM(BPF_REG_0, 0),
2419 BPF_EXIT_INSN(),
2420 },
2421 .result = ACCEPT,
2422 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2423 },
2424 {
William Tu63dfef72017-02-04 08:37:29 -08002425 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2426 .insns = {
2427 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2428 offsetof(struct __sk_buff, data)),
2429 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2430 offsetof(struct __sk_buff, data_end)),
2431 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2433 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2434 BPF_MOV64_IMM(BPF_REG_5, 12),
2435 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2436 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2437 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2438 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2439 BPF_MOV64_IMM(BPF_REG_0, 1),
2440 BPF_EXIT_INSN(),
2441 BPF_MOV64_IMM(BPF_REG_0, 0),
2442 BPF_EXIT_INSN(),
2443 },
2444 .result = ACCEPT,
2445 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2446 },
2447 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002448 "direct packet access: test15 (spill with xadd)",
2449 .insns = {
2450 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2451 offsetof(struct __sk_buff, data)),
2452 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2453 offsetof(struct __sk_buff, data_end)),
2454 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2456 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2457 BPF_MOV64_IMM(BPF_REG_5, 4096),
2458 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2460 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2461 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2463 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2464 BPF_MOV64_IMM(BPF_REG_0, 0),
2465 BPF_EXIT_INSN(),
2466 },
2467 .errstr = "R2 invalid mem access 'inv'",
2468 .result = REJECT,
2469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2470 },
2471 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002472 "helper access to packet: test1, valid packet_ptr range",
2473 .insns = {
2474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2475 offsetof(struct xdp_md, data)),
2476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2477 offsetof(struct xdp_md, data_end)),
2478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2480 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2481 BPF_LD_MAP_FD(BPF_REG_1, 0),
2482 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2483 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2485 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002486 BPF_MOV64_IMM(BPF_REG_0, 0),
2487 BPF_EXIT_INSN(),
2488 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002489 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002490 .result_unpriv = ACCEPT,
2491 .result = ACCEPT,
2492 .prog_type = BPF_PROG_TYPE_XDP,
2493 },
2494 {
2495 "helper access to packet: test2, unchecked packet_ptr",
2496 .insns = {
2497 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2498 offsetof(struct xdp_md, data)),
2499 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002500 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2501 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002502 BPF_MOV64_IMM(BPF_REG_0, 0),
2503 BPF_EXIT_INSN(),
2504 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002505 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002506 .result = REJECT,
2507 .errstr = "invalid access to packet",
2508 .prog_type = BPF_PROG_TYPE_XDP,
2509 },
2510 {
2511 "helper access to packet: test3, variable add",
2512 .insns = {
2513 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2514 offsetof(struct xdp_md, data)),
2515 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2516 offsetof(struct xdp_md, data_end)),
2517 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2519 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2520 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2521 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2522 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2523 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2525 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2526 BPF_LD_MAP_FD(BPF_REG_1, 0),
2527 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002528 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2529 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002530 BPF_MOV64_IMM(BPF_REG_0, 0),
2531 BPF_EXIT_INSN(),
2532 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002533 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002534 .result = ACCEPT,
2535 .prog_type = BPF_PROG_TYPE_XDP,
2536 },
2537 {
2538 "helper access to packet: test4, packet_ptr with bad range",
2539 .insns = {
2540 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2541 offsetof(struct xdp_md, data)),
2542 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2543 offsetof(struct xdp_md, data_end)),
2544 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2546 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2547 BPF_MOV64_IMM(BPF_REG_0, 0),
2548 BPF_EXIT_INSN(),
2549 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2551 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002552 BPF_MOV64_IMM(BPF_REG_0, 0),
2553 BPF_EXIT_INSN(),
2554 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002555 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002556 .result = REJECT,
2557 .errstr = "invalid access to packet",
2558 .prog_type = BPF_PROG_TYPE_XDP,
2559 },
2560 {
2561 "helper access to packet: test5, packet_ptr with too short range",
2562 .insns = {
2563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2564 offsetof(struct xdp_md, data)),
2565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2566 offsetof(struct xdp_md, data_end)),
2567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2568 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2570 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2571 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2573 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07002574 BPF_MOV64_IMM(BPF_REG_0, 0),
2575 BPF_EXIT_INSN(),
2576 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002577 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002578 .result = REJECT,
2579 .errstr = "invalid access to packet",
2580 .prog_type = BPF_PROG_TYPE_XDP,
2581 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002582 {
2583 "helper access to packet: test6, cls valid packet_ptr range",
2584 .insns = {
2585 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2586 offsetof(struct __sk_buff, data)),
2587 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2588 offsetof(struct __sk_buff, data_end)),
2589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2591 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2592 BPF_LD_MAP_FD(BPF_REG_1, 0),
2593 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2594 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2596 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002597 BPF_MOV64_IMM(BPF_REG_0, 0),
2598 BPF_EXIT_INSN(),
2599 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002600 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002601 .result = ACCEPT,
2602 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2603 },
2604 {
2605 "helper access to packet: test7, cls unchecked packet_ptr",
2606 .insns = {
2607 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2608 offsetof(struct __sk_buff, data)),
2609 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002610 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2611 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002612 BPF_MOV64_IMM(BPF_REG_0, 0),
2613 BPF_EXIT_INSN(),
2614 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002615 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002616 .result = REJECT,
2617 .errstr = "invalid access to packet",
2618 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2619 },
2620 {
2621 "helper access to packet: test8, cls variable add",
2622 .insns = {
2623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2624 offsetof(struct __sk_buff, data)),
2625 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2626 offsetof(struct __sk_buff, data_end)),
2627 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2629 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2630 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2631 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2632 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2633 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2635 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2636 BPF_LD_MAP_FD(BPF_REG_1, 0),
2637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2639 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002640 BPF_MOV64_IMM(BPF_REG_0, 0),
2641 BPF_EXIT_INSN(),
2642 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002643 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002644 .result = ACCEPT,
2645 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2646 },
2647 {
2648 "helper access to packet: test9, cls packet_ptr with bad range",
2649 .insns = {
2650 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2651 offsetof(struct __sk_buff, data)),
2652 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2653 offsetof(struct __sk_buff, data_end)),
2654 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2656 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2657 BPF_MOV64_IMM(BPF_REG_0, 0),
2658 BPF_EXIT_INSN(),
2659 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2661 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002662 BPF_MOV64_IMM(BPF_REG_0, 0),
2663 BPF_EXIT_INSN(),
2664 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002665 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002666 .result = REJECT,
2667 .errstr = "invalid access to packet",
2668 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2669 },
2670 {
2671 "helper access to packet: test10, cls packet_ptr with too short range",
2672 .insns = {
2673 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2674 offsetof(struct __sk_buff, data)),
2675 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2676 offsetof(struct __sk_buff, data_end)),
2677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2678 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2680 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2681 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2683 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002684 BPF_MOV64_IMM(BPF_REG_0, 0),
2685 BPF_EXIT_INSN(),
2686 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002687 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002688 .result = REJECT,
2689 .errstr = "invalid access to packet",
2690 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2691 },
2692 {
2693 "helper access to packet: test11, cls unsuitable helper 1",
2694 .insns = {
2695 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2696 offsetof(struct __sk_buff, data)),
2697 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2698 offsetof(struct __sk_buff, data_end)),
2699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2700 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2702 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2703 BPF_MOV64_IMM(BPF_REG_2, 0),
2704 BPF_MOV64_IMM(BPF_REG_4, 42),
2705 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2707 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002708 BPF_MOV64_IMM(BPF_REG_0, 0),
2709 BPF_EXIT_INSN(),
2710 },
2711 .result = REJECT,
2712 .errstr = "helper access to the packet",
2713 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2714 },
2715 {
2716 "helper access to packet: test12, cls unsuitable helper 2",
2717 .insns = {
2718 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2719 offsetof(struct __sk_buff, data)),
2720 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2721 offsetof(struct __sk_buff, data_end)),
2722 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2724 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2725 BPF_MOV64_IMM(BPF_REG_2, 0),
2726 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2728 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002729 BPF_MOV64_IMM(BPF_REG_0, 0),
2730 BPF_EXIT_INSN(),
2731 },
2732 .result = REJECT,
2733 .errstr = "helper access to the packet",
2734 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2735 },
2736 {
2737 "helper access to packet: test13, cls helper ok",
2738 .insns = {
2739 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2740 offsetof(struct __sk_buff, data)),
2741 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2742 offsetof(struct __sk_buff, data_end)),
2743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2744 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2746 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2747 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2748 BPF_MOV64_IMM(BPF_REG_2, 4),
2749 BPF_MOV64_IMM(BPF_REG_3, 0),
2750 BPF_MOV64_IMM(BPF_REG_4, 0),
2751 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2753 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002754 BPF_MOV64_IMM(BPF_REG_0, 0),
2755 BPF_EXIT_INSN(),
2756 },
2757 .result = ACCEPT,
2758 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2759 },
2760 {
2761 "helper access to packet: test14, cls helper fail sub",
2762 .insns = {
2763 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2764 offsetof(struct __sk_buff, data)),
2765 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2766 offsetof(struct __sk_buff, data_end)),
2767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2770 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2771 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2772 BPF_MOV64_IMM(BPF_REG_2, 4),
2773 BPF_MOV64_IMM(BPF_REG_3, 0),
2774 BPF_MOV64_IMM(BPF_REG_4, 0),
2775 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2777 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002778 BPF_MOV64_IMM(BPF_REG_0, 0),
2779 BPF_EXIT_INSN(),
2780 },
2781 .result = REJECT,
2782 .errstr = "type=inv expected=fp",
2783 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2784 },
2785 {
2786 "helper access to packet: test15, cls helper fail range 1",
2787 .insns = {
2788 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2789 offsetof(struct __sk_buff, data)),
2790 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2791 offsetof(struct __sk_buff, data_end)),
2792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2793 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2795 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2796 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2797 BPF_MOV64_IMM(BPF_REG_2, 8),
2798 BPF_MOV64_IMM(BPF_REG_3, 0),
2799 BPF_MOV64_IMM(BPF_REG_4, 0),
2800 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002801 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2802 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002803 BPF_MOV64_IMM(BPF_REG_0, 0),
2804 BPF_EXIT_INSN(),
2805 },
2806 .result = REJECT,
2807 .errstr = "invalid access to packet",
2808 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2809 },
2810 {
2811 "helper access to packet: test16, cls helper fail range 2",
2812 .insns = {
2813 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2814 offsetof(struct __sk_buff, data)),
2815 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2816 offsetof(struct __sk_buff, data_end)),
2817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2820 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2821 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2822 BPF_MOV64_IMM(BPF_REG_2, -9),
2823 BPF_MOV64_IMM(BPF_REG_3, 0),
2824 BPF_MOV64_IMM(BPF_REG_4, 0),
2825 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2827 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002828 BPF_MOV64_IMM(BPF_REG_0, 0),
2829 BPF_EXIT_INSN(),
2830 },
2831 .result = REJECT,
2832 .errstr = "invalid access to packet",
2833 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2834 },
2835 {
2836 "helper access to packet: test17, cls helper fail range 3",
2837 .insns = {
2838 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2839 offsetof(struct __sk_buff, data)),
2840 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2841 offsetof(struct __sk_buff, data_end)),
2842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2845 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2846 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2847 BPF_MOV64_IMM(BPF_REG_2, ~0),
2848 BPF_MOV64_IMM(BPF_REG_3, 0),
2849 BPF_MOV64_IMM(BPF_REG_4, 0),
2850 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2852 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002853 BPF_MOV64_IMM(BPF_REG_0, 0),
2854 BPF_EXIT_INSN(),
2855 },
2856 .result = REJECT,
2857 .errstr = "invalid access to packet",
2858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2859 },
2860 {
2861 "helper access to packet: test18, cls helper fail range zero",
2862 .insns = {
2863 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2864 offsetof(struct __sk_buff, data)),
2865 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2866 offsetof(struct __sk_buff, data_end)),
2867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2868 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2870 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2872 BPF_MOV64_IMM(BPF_REG_2, 0),
2873 BPF_MOV64_IMM(BPF_REG_3, 0),
2874 BPF_MOV64_IMM(BPF_REG_4, 0),
2875 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2877 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002878 BPF_MOV64_IMM(BPF_REG_0, 0),
2879 BPF_EXIT_INSN(),
2880 },
2881 .result = REJECT,
2882 .errstr = "invalid access to packet",
2883 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2884 },
2885 {
2886 "helper access to packet: test19, pkt end as input",
2887 .insns = {
2888 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2889 offsetof(struct __sk_buff, data)),
2890 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2891 offsetof(struct __sk_buff, data_end)),
2892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2895 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2896 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
2897 BPF_MOV64_IMM(BPF_REG_2, 4),
2898 BPF_MOV64_IMM(BPF_REG_3, 0),
2899 BPF_MOV64_IMM(BPF_REG_4, 0),
2900 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2902 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002903 BPF_MOV64_IMM(BPF_REG_0, 0),
2904 BPF_EXIT_INSN(),
2905 },
2906 .result = REJECT,
2907 .errstr = "R1 type=pkt_end expected=fp",
2908 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2909 },
2910 {
2911 "helper access to packet: test20, wrong reg",
2912 .insns = {
2913 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2914 offsetof(struct __sk_buff, data)),
2915 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2916 offsetof(struct __sk_buff, data_end)),
2917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2918 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2920 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2921 BPF_MOV64_IMM(BPF_REG_2, 4),
2922 BPF_MOV64_IMM(BPF_REG_3, 0),
2923 BPF_MOV64_IMM(BPF_REG_4, 0),
2924 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2926 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002927 BPF_MOV64_IMM(BPF_REG_0, 0),
2928 BPF_EXIT_INSN(),
2929 },
2930 .result = REJECT,
2931 .errstr = "invalid access to packet",
2932 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2933 },
Josef Bacik48461132016-09-28 10:54:32 -04002934 {
2935 "valid map access into an array with a constant",
2936 .insns = {
2937 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2940 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2942 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002944 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2945 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002946 BPF_EXIT_INSN(),
2947 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002948 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04002949 .errstr_unpriv = "R0 leaks addr",
2950 .result_unpriv = REJECT,
2951 .result = ACCEPT,
2952 },
2953 {
2954 "valid map access into an array with a register",
2955 .insns = {
2956 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2957 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2959 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002960 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2961 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002962 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2963 BPF_MOV64_IMM(BPF_REG_1, 4),
2964 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2965 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002966 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2967 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002968 BPF_EXIT_INSN(),
2969 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002970 .fixup_map2 = { 3 },
2971 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002972 .result_unpriv = REJECT,
2973 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002974 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04002975 },
2976 {
2977 "valid map access into an array with a variable",
2978 .insns = {
2979 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2980 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2982 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2984 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04002985 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
2986 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2987 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
2988 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2989 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002990 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2991 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04002992 BPF_EXIT_INSN(),
2993 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002994 .fixup_map2 = { 3 },
2995 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04002996 .result_unpriv = REJECT,
2997 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002998 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04002999 },
3000 {
3001 "valid map access into an array with a signed variable",
3002 .insns = {
3003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3006 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003007 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3008 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003009 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3010 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3011 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3012 BPF_MOV32_IMM(BPF_REG_1, 0),
3013 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3014 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3015 BPF_MOV32_IMM(BPF_REG_1, 0),
3016 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3017 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003018 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3019 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003020 BPF_EXIT_INSN(),
3021 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003022 .fixup_map2 = { 3 },
3023 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003024 .result_unpriv = REJECT,
3025 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003026 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003027 },
3028 {
3029 "invalid map access into an array with a constant",
3030 .insns = {
3031 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3034 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3036 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003037 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3038 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3039 offsetof(struct test_val, foo)),
3040 BPF_EXIT_INSN(),
3041 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003042 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003043 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3044 .result = REJECT,
3045 },
3046 {
3047 "invalid map access into an array with a register",
3048 .insns = {
3049 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3052 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3054 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003055 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3056 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3057 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3058 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003059 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3060 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003061 BPF_EXIT_INSN(),
3062 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003063 .fixup_map2 = { 3 },
3064 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003065 .errstr = "R0 min value is outside of the array range",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003066 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003067 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003068 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003069 },
3070 {
3071 "invalid map access into an array with a variable",
3072 .insns = {
3073 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3076 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3078 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3080 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3081 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3082 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003083 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3084 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003085 BPF_EXIT_INSN(),
3086 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003087 .fixup_map2 = { 3 },
3088 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003089 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003090 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003091 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003092 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003093 },
3094 {
3095 "invalid map access into an array with no floor check",
3096 .insns = {
3097 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3098 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3100 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3102 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3104 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3105 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3106 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3107 BPF_MOV32_IMM(BPF_REG_1, 0),
3108 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3109 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003110 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3111 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003112 BPF_EXIT_INSN(),
3113 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003114 .fixup_map2 = { 3 },
3115 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003116 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003117 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003118 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003119 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003120 },
3121 {
3122 "invalid map access into an array with a invalid max check",
3123 .insns = {
3124 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3127 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3129 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3131 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3132 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3133 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3134 BPF_MOV32_IMM(BPF_REG_1, 0),
3135 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3136 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003137 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3138 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003139 BPF_EXIT_INSN(),
3140 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003141 .fixup_map2 = { 3 },
3142 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003143 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003144 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003145 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003146 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003147 },
3148 {
3149 "invalid map access into an array with a invalid max check",
3150 .insns = {
3151 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3152 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3154 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3156 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3158 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3159 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3162 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3164 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3166 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003167 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3168 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003169 BPF_EXIT_INSN(),
3170 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003171 .fixup_map2 = { 3, 11 },
3172 .errstr_unpriv = "R0 pointer arithmetic prohibited",
Josef Bacik48461132016-09-28 10:54:32 -04003173 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003174 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003175 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003176 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003177 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003178 {
3179 "multiple registers share map_lookup_elem result",
3180 .insns = {
3181 BPF_MOV64_IMM(BPF_REG_1, 10),
3182 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3185 BPF_LD_MAP_FD(BPF_REG_1, 0),
3186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3187 BPF_FUNC_map_lookup_elem),
3188 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3189 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3190 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3191 BPF_EXIT_INSN(),
3192 },
3193 .fixup_map1 = { 4 },
3194 .result = ACCEPT,
3195 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3196 },
3197 {
3198 "invalid memory access with multiple map_lookup_elem calls",
3199 .insns = {
3200 BPF_MOV64_IMM(BPF_REG_1, 10),
3201 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3202 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3204 BPF_LD_MAP_FD(BPF_REG_1, 0),
3205 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3206 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3208 BPF_FUNC_map_lookup_elem),
3209 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3210 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3213 BPF_FUNC_map_lookup_elem),
3214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3215 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3216 BPF_EXIT_INSN(),
3217 },
3218 .fixup_map1 = { 4 },
3219 .result = REJECT,
3220 .errstr = "R4 !read_ok",
3221 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3222 },
3223 {
3224 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3225 .insns = {
3226 BPF_MOV64_IMM(BPF_REG_1, 10),
3227 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3230 BPF_LD_MAP_FD(BPF_REG_1, 0),
3231 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3232 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3234 BPF_FUNC_map_lookup_elem),
3235 BPF_MOV64_IMM(BPF_REG_2, 10),
3236 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3238 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3239 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3240 BPF_FUNC_map_lookup_elem),
3241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3243 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3244 BPF_EXIT_INSN(),
3245 },
3246 .fixup_map1 = { 4 },
3247 .result = ACCEPT,
3248 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3249 },
Josef Bacike9548902016-11-29 12:35:19 -05003250 {
Daniel Borkmanna08dd0d2016-12-15 01:30:06 +01003251 "multiple registers share map_lookup_elem bad reg type",
3252 .insns = {
3253 BPF_MOV64_IMM(BPF_REG_1, 10),
3254 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3255 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3257 BPF_LD_MAP_FD(BPF_REG_1, 0),
3258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3259 BPF_FUNC_map_lookup_elem),
3260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3261 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3262 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3263 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3265 BPF_MOV64_IMM(BPF_REG_1, 1),
3266 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3267 BPF_MOV64_IMM(BPF_REG_1, 2),
3268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3269 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3270 BPF_MOV64_IMM(BPF_REG_1, 3),
3271 BPF_EXIT_INSN(),
3272 },
3273 .fixup_map1 = { 4 },
3274 .result = REJECT,
3275 .errstr = "R3 invalid mem access 'inv'",
3276 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3277 },
3278 {
Josef Bacike9548902016-11-29 12:35:19 -05003279 "invalid map access from else condition",
3280 .insns = {
3281 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3284 BPF_LD_MAP_FD(BPF_REG_1, 0),
3285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3286 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3287 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3288 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3290 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3291 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3292 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3293 BPF_EXIT_INSN(),
3294 },
3295 .fixup_map2 = { 3 },
3296 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3297 .result = REJECT,
3298 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3299 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003300 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05003301 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08003302 {
3303 "constant register |= constant should keep constant type",
3304 .insns = {
3305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3307 BPF_MOV64_IMM(BPF_REG_2, 34),
3308 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3309 BPF_MOV64_IMM(BPF_REG_3, 0),
3310 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3311 BPF_EXIT_INSN(),
3312 },
3313 .result = ACCEPT,
3314 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3315 },
3316 {
3317 "constant register |= constant should not bypass stack boundary checks",
3318 .insns = {
3319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3321 BPF_MOV64_IMM(BPF_REG_2, 34),
3322 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3323 BPF_MOV64_IMM(BPF_REG_3, 0),
3324 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3325 BPF_EXIT_INSN(),
3326 },
3327 .errstr = "invalid stack type R1 off=-48 access_size=58",
3328 .result = REJECT,
3329 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3330 },
3331 {
3332 "constant register |= constant register should keep constant type",
3333 .insns = {
3334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3336 BPF_MOV64_IMM(BPF_REG_2, 34),
3337 BPF_MOV64_IMM(BPF_REG_4, 13),
3338 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3339 BPF_MOV64_IMM(BPF_REG_3, 0),
3340 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3341 BPF_EXIT_INSN(),
3342 },
3343 .result = ACCEPT,
3344 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3345 },
3346 {
3347 "constant register |= constant register should not bypass stack boundary checks",
3348 .insns = {
3349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3351 BPF_MOV64_IMM(BPF_REG_2, 34),
3352 BPF_MOV64_IMM(BPF_REG_4, 24),
3353 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3354 BPF_MOV64_IMM(BPF_REG_3, 0),
3355 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3356 BPF_EXIT_INSN(),
3357 },
3358 .errstr = "invalid stack type R1 off=-48 access_size=58",
3359 .result = REJECT,
3360 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3361 },
Thomas Graf3f731d82016-12-05 10:30:52 +01003362 {
3363 "invalid direct packet write for LWT_IN",
3364 .insns = {
3365 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3366 offsetof(struct __sk_buff, data)),
3367 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3368 offsetof(struct __sk_buff, data_end)),
3369 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3371 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3372 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3373 BPF_MOV64_IMM(BPF_REG_0, 0),
3374 BPF_EXIT_INSN(),
3375 },
3376 .errstr = "cannot write into packet",
3377 .result = REJECT,
3378 .prog_type = BPF_PROG_TYPE_LWT_IN,
3379 },
3380 {
3381 "invalid direct packet write for LWT_OUT",
3382 .insns = {
3383 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3384 offsetof(struct __sk_buff, data)),
3385 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3386 offsetof(struct __sk_buff, data_end)),
3387 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3389 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3390 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3391 BPF_MOV64_IMM(BPF_REG_0, 0),
3392 BPF_EXIT_INSN(),
3393 },
3394 .errstr = "cannot write into packet",
3395 .result = REJECT,
3396 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3397 },
3398 {
3399 "direct packet write for LWT_XMIT",
3400 .insns = {
3401 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3402 offsetof(struct __sk_buff, data)),
3403 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3404 offsetof(struct __sk_buff, data_end)),
3405 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3407 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3408 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3409 BPF_MOV64_IMM(BPF_REG_0, 0),
3410 BPF_EXIT_INSN(),
3411 },
3412 .result = ACCEPT,
3413 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3414 },
3415 {
3416 "direct packet read for LWT_IN",
3417 .insns = {
3418 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3419 offsetof(struct __sk_buff, data)),
3420 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3421 offsetof(struct __sk_buff, data_end)),
3422 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3424 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3425 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3426 BPF_MOV64_IMM(BPF_REG_0, 0),
3427 BPF_EXIT_INSN(),
3428 },
3429 .result = ACCEPT,
3430 .prog_type = BPF_PROG_TYPE_LWT_IN,
3431 },
3432 {
3433 "direct packet read for LWT_OUT",
3434 .insns = {
3435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3436 offsetof(struct __sk_buff, data)),
3437 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3438 offsetof(struct __sk_buff, data_end)),
3439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3441 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3442 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3443 BPF_MOV64_IMM(BPF_REG_0, 0),
3444 BPF_EXIT_INSN(),
3445 },
3446 .result = ACCEPT,
3447 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3448 },
3449 {
3450 "direct packet read for LWT_XMIT",
3451 .insns = {
3452 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3453 offsetof(struct __sk_buff, data)),
3454 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3455 offsetof(struct __sk_buff, data_end)),
3456 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3458 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3460 BPF_MOV64_IMM(BPF_REG_0, 0),
3461 BPF_EXIT_INSN(),
3462 },
3463 .result = ACCEPT,
3464 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3465 },
3466 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07003467 "overlapping checks for direct packet access",
3468 .insns = {
3469 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3470 offsetof(struct __sk_buff, data)),
3471 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3472 offsetof(struct __sk_buff, data_end)),
3473 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3475 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3478 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3479 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3480 BPF_MOV64_IMM(BPF_REG_0, 0),
3481 BPF_EXIT_INSN(),
3482 },
3483 .result = ACCEPT,
3484 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3485 },
3486 {
Thomas Graf3f731d82016-12-05 10:30:52 +01003487 "invalid access of tc_classid for LWT_IN",
3488 .insns = {
3489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3490 offsetof(struct __sk_buff, tc_classid)),
3491 BPF_EXIT_INSN(),
3492 },
3493 .result = REJECT,
3494 .errstr = "invalid bpf_context access",
3495 },
3496 {
3497 "invalid access of tc_classid for LWT_OUT",
3498 .insns = {
3499 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3500 offsetof(struct __sk_buff, tc_classid)),
3501 BPF_EXIT_INSN(),
3502 },
3503 .result = REJECT,
3504 .errstr = "invalid bpf_context access",
3505 },
3506 {
3507 "invalid access of tc_classid for LWT_XMIT",
3508 .insns = {
3509 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3510 offsetof(struct __sk_buff, tc_classid)),
3511 BPF_EXIT_INSN(),
3512 },
3513 .result = REJECT,
3514 .errstr = "invalid bpf_context access",
3515 },
Gianluca Borello57225692017-01-09 10:19:47 -08003516 {
3517 "helper access to map: full range",
3518 .insns = {
3519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3521 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3522 BPF_LD_MAP_FD(BPF_REG_1, 0),
3523 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3525 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3526 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3527 BPF_MOV64_IMM(BPF_REG_3, 0),
3528 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3529 BPF_EXIT_INSN(),
3530 },
3531 .fixup_map2 = { 3 },
3532 .result = ACCEPT,
3533 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3534 },
3535 {
3536 "helper access to map: partial range",
3537 .insns = {
3538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3540 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3541 BPF_LD_MAP_FD(BPF_REG_1, 0),
3542 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3543 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3545 BPF_MOV64_IMM(BPF_REG_2, 8),
3546 BPF_MOV64_IMM(BPF_REG_3, 0),
3547 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3548 BPF_EXIT_INSN(),
3549 },
3550 .fixup_map2 = { 3 },
3551 .result = ACCEPT,
3552 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3553 },
3554 {
3555 "helper access to map: empty range",
3556 .insns = {
3557 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3559 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3560 BPF_LD_MAP_FD(BPF_REG_1, 0),
3561 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3563 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3564 BPF_MOV64_IMM(BPF_REG_2, 0),
3565 BPF_MOV64_IMM(BPF_REG_3, 0),
3566 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3567 BPF_EXIT_INSN(),
3568 },
3569 .fixup_map2 = { 3 },
3570 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3571 .result = REJECT,
3572 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3573 },
3574 {
3575 "helper access to map: out-of-bound range",
3576 .insns = {
3577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3579 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3580 BPF_LD_MAP_FD(BPF_REG_1, 0),
3581 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3583 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3584 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3585 BPF_MOV64_IMM(BPF_REG_3, 0),
3586 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3587 BPF_EXIT_INSN(),
3588 },
3589 .fixup_map2 = { 3 },
3590 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3591 .result = REJECT,
3592 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3593 },
3594 {
3595 "helper access to map: negative range",
3596 .insns = {
3597 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3599 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3600 BPF_LD_MAP_FD(BPF_REG_1, 0),
3601 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3604 BPF_MOV64_IMM(BPF_REG_2, -8),
3605 BPF_MOV64_IMM(BPF_REG_3, 0),
3606 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3607 BPF_EXIT_INSN(),
3608 },
3609 .fixup_map2 = { 3 },
3610 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3611 .result = REJECT,
3612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3613 },
3614 {
3615 "helper access to adjusted map (via const imm): full range",
3616 .insns = {
3617 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3619 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3620 BPF_LD_MAP_FD(BPF_REG_1, 0),
3621 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3625 offsetof(struct test_val, foo)),
3626 BPF_MOV64_IMM(BPF_REG_2,
3627 sizeof(struct test_val) -
3628 offsetof(struct test_val, foo)),
3629 BPF_MOV64_IMM(BPF_REG_3, 0),
3630 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3631 BPF_EXIT_INSN(),
3632 },
3633 .fixup_map2 = { 3 },
3634 .result = ACCEPT,
3635 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3636 },
3637 {
3638 "helper access to adjusted map (via const imm): partial range",
3639 .insns = {
3640 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3642 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3643 BPF_LD_MAP_FD(BPF_REG_1, 0),
3644 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3645 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3646 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3648 offsetof(struct test_val, foo)),
3649 BPF_MOV64_IMM(BPF_REG_2, 8),
3650 BPF_MOV64_IMM(BPF_REG_3, 0),
3651 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3652 BPF_EXIT_INSN(),
3653 },
3654 .fixup_map2 = { 3 },
3655 .result = ACCEPT,
3656 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3657 },
3658 {
3659 "helper access to adjusted map (via const imm): empty range",
3660 .insns = {
3661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3664 BPF_LD_MAP_FD(BPF_REG_1, 0),
3665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3669 offsetof(struct test_val, foo)),
3670 BPF_MOV64_IMM(BPF_REG_2, 0),
3671 BPF_MOV64_IMM(BPF_REG_3, 0),
3672 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3673 BPF_EXIT_INSN(),
3674 },
3675 .fixup_map2 = { 3 },
3676 .errstr = "R1 min value is outside of the array range",
3677 .result = REJECT,
3678 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3679 },
3680 {
3681 "helper access to adjusted map (via const imm): out-of-bound range",
3682 .insns = {
3683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3685 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3686 BPF_LD_MAP_FD(BPF_REG_1, 0),
3687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3691 offsetof(struct test_val, foo)),
3692 BPF_MOV64_IMM(BPF_REG_2,
3693 sizeof(struct test_val) -
3694 offsetof(struct test_val, foo) + 8),
3695 BPF_MOV64_IMM(BPF_REG_3, 0),
3696 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3697 BPF_EXIT_INSN(),
3698 },
3699 .fixup_map2 = { 3 },
3700 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3701 .result = REJECT,
3702 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3703 },
3704 {
3705 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3706 .insns = {
3707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3709 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3710 BPF_LD_MAP_FD(BPF_REG_1, 0),
3711 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3715 offsetof(struct test_val, foo)),
3716 BPF_MOV64_IMM(BPF_REG_2, -8),
3717 BPF_MOV64_IMM(BPF_REG_3, 0),
3718 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3719 BPF_EXIT_INSN(),
3720 },
3721 .fixup_map2 = { 3 },
3722 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3723 .result = REJECT,
3724 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3725 },
3726 {
3727 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3728 .insns = {
3729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3731 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3732 BPF_LD_MAP_FD(BPF_REG_1, 0),
3733 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3737 offsetof(struct test_val, foo)),
3738 BPF_MOV64_IMM(BPF_REG_2, -1),
3739 BPF_MOV64_IMM(BPF_REG_3, 0),
3740 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3741 BPF_EXIT_INSN(),
3742 },
3743 .fixup_map2 = { 3 },
3744 .errstr = "R1 min value is outside of the array range",
3745 .result = REJECT,
3746 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3747 },
3748 {
3749 "helper access to adjusted map (via const reg): full range",
3750 .insns = {
3751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3753 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3754 BPF_LD_MAP_FD(BPF_REG_1, 0),
3755 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3758 BPF_MOV64_IMM(BPF_REG_3,
3759 offsetof(struct test_val, foo)),
3760 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3761 BPF_MOV64_IMM(BPF_REG_2,
3762 sizeof(struct test_val) -
3763 offsetof(struct test_val, foo)),
3764 BPF_MOV64_IMM(BPF_REG_3, 0),
3765 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3766 BPF_EXIT_INSN(),
3767 },
3768 .fixup_map2 = { 3 },
3769 .result = ACCEPT,
3770 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3771 },
3772 {
3773 "helper access to adjusted map (via const reg): partial range",
3774 .insns = {
3775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3777 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3778 BPF_LD_MAP_FD(BPF_REG_1, 0),
3779 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3781 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3782 BPF_MOV64_IMM(BPF_REG_3,
3783 offsetof(struct test_val, foo)),
3784 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3785 BPF_MOV64_IMM(BPF_REG_2, 8),
3786 BPF_MOV64_IMM(BPF_REG_3, 0),
3787 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3788 BPF_EXIT_INSN(),
3789 },
3790 .fixup_map2 = { 3 },
3791 .result = ACCEPT,
3792 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3793 },
3794 {
3795 "helper access to adjusted map (via const reg): empty range",
3796 .insns = {
3797 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3799 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3800 BPF_LD_MAP_FD(BPF_REG_1, 0),
3801 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3804 BPF_MOV64_IMM(BPF_REG_3, 0),
3805 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3806 BPF_MOV64_IMM(BPF_REG_2, 0),
3807 BPF_MOV64_IMM(BPF_REG_3, 0),
3808 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3809 BPF_EXIT_INSN(),
3810 },
3811 .fixup_map2 = { 3 },
3812 .errstr = "R1 min value is outside of the array range",
3813 .result = REJECT,
3814 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3815 },
3816 {
3817 "helper access to adjusted map (via const reg): out-of-bound range",
3818 .insns = {
3819 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3821 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3822 BPF_LD_MAP_FD(BPF_REG_1, 0),
3823 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3825 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3826 BPF_MOV64_IMM(BPF_REG_3,
3827 offsetof(struct test_val, foo)),
3828 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3829 BPF_MOV64_IMM(BPF_REG_2,
3830 sizeof(struct test_val) -
3831 offsetof(struct test_val, foo) + 8),
3832 BPF_MOV64_IMM(BPF_REG_3, 0),
3833 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3834 BPF_EXIT_INSN(),
3835 },
3836 .fixup_map2 = { 3 },
3837 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3838 .result = REJECT,
3839 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3840 },
3841 {
3842 "helper access to adjusted map (via const reg): negative range (> adjustment)",
3843 .insns = {
3844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3846 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3847 BPF_LD_MAP_FD(BPF_REG_1, 0),
3848 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3851 BPF_MOV64_IMM(BPF_REG_3,
3852 offsetof(struct test_val, foo)),
3853 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3854 BPF_MOV64_IMM(BPF_REG_2, -8),
3855 BPF_MOV64_IMM(BPF_REG_3, 0),
3856 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3857 BPF_EXIT_INSN(),
3858 },
3859 .fixup_map2 = { 3 },
3860 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3861 .result = REJECT,
3862 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3863 },
3864 {
3865 "helper access to adjusted map (via const reg): negative range (< adjustment)",
3866 .insns = {
3867 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3869 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3870 BPF_LD_MAP_FD(BPF_REG_1, 0),
3871 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3872 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3873 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3874 BPF_MOV64_IMM(BPF_REG_3,
3875 offsetof(struct test_val, foo)),
3876 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3877 BPF_MOV64_IMM(BPF_REG_2, -1),
3878 BPF_MOV64_IMM(BPF_REG_3, 0),
3879 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3880 BPF_EXIT_INSN(),
3881 },
3882 .fixup_map2 = { 3 },
3883 .errstr = "R1 min value is outside of the array range",
3884 .result = REJECT,
3885 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3886 },
3887 {
3888 "helper access to adjusted map (via variable): full range",
3889 .insns = {
3890 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3892 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3893 BPF_LD_MAP_FD(BPF_REG_1, 0),
3894 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3895 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3896 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3898 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3899 offsetof(struct test_val, foo), 4),
3900 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3901 BPF_MOV64_IMM(BPF_REG_2,
3902 sizeof(struct test_val) -
3903 offsetof(struct test_val, foo)),
3904 BPF_MOV64_IMM(BPF_REG_3, 0),
3905 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3906 BPF_EXIT_INSN(),
3907 },
3908 .fixup_map2 = { 3 },
3909 .result = ACCEPT,
3910 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3911 },
3912 {
3913 "helper access to adjusted map (via variable): partial range",
3914 .insns = {
3915 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3917 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3918 BPF_LD_MAP_FD(BPF_REG_1, 0),
3919 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3920 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3922 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3923 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3924 offsetof(struct test_val, foo), 4),
3925 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3926 BPF_MOV64_IMM(BPF_REG_2, 8),
3927 BPF_MOV64_IMM(BPF_REG_3, 0),
3928 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3929 BPF_EXIT_INSN(),
3930 },
3931 .fixup_map2 = { 3 },
3932 .result = ACCEPT,
3933 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3934 },
3935 {
3936 "helper access to adjusted map (via variable): empty range",
3937 .insns = {
3938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3940 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3941 BPF_LD_MAP_FD(BPF_REG_1, 0),
3942 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3945 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3946 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3947 offsetof(struct test_val, foo), 4),
3948 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3949 BPF_MOV64_IMM(BPF_REG_2, 0),
3950 BPF_MOV64_IMM(BPF_REG_3, 0),
3951 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3952 BPF_EXIT_INSN(),
3953 },
3954 .fixup_map2 = { 3 },
3955 .errstr = "R1 min value is outside of the array range",
3956 .result = REJECT,
3957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3958 },
3959 {
3960 "helper access to adjusted map (via variable): no max check",
3961 .insns = {
3962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3964 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3965 BPF_LD_MAP_FD(BPF_REG_1, 0),
3966 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3967 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3968 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3969 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3970 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3971 BPF_MOV64_IMM(BPF_REG_2, 0),
3972 BPF_MOV64_IMM(BPF_REG_3, 0),
3973 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3974 BPF_EXIT_INSN(),
3975 },
3976 .fixup_map2 = { 3 },
3977 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
3978 .result = REJECT,
3979 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3980 },
3981 {
3982 "helper access to adjusted map (via variable): wrong max check",
3983 .insns = {
3984 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3986 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3987 BPF_LD_MAP_FD(BPF_REG_1, 0),
3988 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3990 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3991 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3992 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3993 offsetof(struct test_val, foo), 4),
3994 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3995 BPF_MOV64_IMM(BPF_REG_2,
3996 sizeof(struct test_val) -
3997 offsetof(struct test_val, foo) + 1),
3998 BPF_MOV64_IMM(BPF_REG_3, 0),
3999 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4000 BPF_EXIT_INSN(),
4001 },
4002 .fixup_map2 = { 3 },
4003 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4004 .result = REJECT,
4005 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4006 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08004007 {
4008 "map element value is preserved across register spilling",
4009 .insns = {
4010 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4012 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4013 BPF_LD_MAP_FD(BPF_REG_1, 0),
4014 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4015 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4016 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4017 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4019 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4020 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4021 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4022 BPF_EXIT_INSN(),
4023 },
4024 .fixup_map2 = { 3 },
4025 .errstr_unpriv = "R0 leaks addr",
4026 .result = ACCEPT,
4027 .result_unpriv = REJECT,
4028 },
4029 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004030 "map element value or null is marked on register spilling",
4031 .insns = {
4032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4034 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4035 BPF_LD_MAP_FD(BPF_REG_1, 0),
4036 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4040 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4041 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4042 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4043 BPF_EXIT_INSN(),
4044 },
4045 .fixup_map2 = { 3 },
4046 .errstr_unpriv = "R0 leaks addr",
4047 .result = ACCEPT,
4048 .result_unpriv = REJECT,
4049 },
4050 {
4051 "map element value store of cleared call register",
4052 .insns = {
4053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4055 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4056 BPF_LD_MAP_FD(BPF_REG_1, 0),
4057 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4059 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4060 BPF_EXIT_INSN(),
4061 },
4062 .fixup_map2 = { 3 },
4063 .errstr_unpriv = "R1 !read_ok",
4064 .errstr = "R1 !read_ok",
4065 .result = REJECT,
4066 .result_unpriv = REJECT,
4067 },
4068 {
4069 "map element value with unaligned store",
4070 .insns = {
4071 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4073 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4074 BPF_LD_MAP_FD(BPF_REG_1, 0),
4075 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4079 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4080 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4081 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4082 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4083 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4084 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4087 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4088 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4089 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4091 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4092 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4093 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4094 BPF_EXIT_INSN(),
4095 },
4096 .fixup_map2 = { 3 },
4097 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4098 .result = ACCEPT,
4099 .result_unpriv = REJECT,
4100 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4101 },
4102 {
4103 "map element value with unaligned load",
4104 .insns = {
4105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4107 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4108 BPF_LD_MAP_FD(BPF_REG_1, 0),
4109 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4111 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4112 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4114 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4115 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4116 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4117 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4118 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4120 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4121 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4122 BPF_EXIT_INSN(),
4123 },
4124 .fixup_map2 = { 3 },
4125 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4126 .result = ACCEPT,
4127 .result_unpriv = REJECT,
4128 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4129 },
4130 {
4131 "map element value illegal alu op, 1",
4132 .insns = {
4133 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4135 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4136 BPF_LD_MAP_FD(BPF_REG_1, 0),
4137 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4139 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4140 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4141 BPF_EXIT_INSN(),
4142 },
4143 .fixup_map2 = { 3 },
4144 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4145 .errstr = "invalid mem access 'inv'",
4146 .result = REJECT,
4147 .result_unpriv = REJECT,
4148 },
4149 {
4150 "map element value illegal alu op, 2",
4151 .insns = {
4152 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4154 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4155 BPF_LD_MAP_FD(BPF_REG_1, 0),
4156 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4158 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4159 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4160 BPF_EXIT_INSN(),
4161 },
4162 .fixup_map2 = { 3 },
4163 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4164 .errstr = "invalid mem access 'inv'",
4165 .result = REJECT,
4166 .result_unpriv = REJECT,
4167 },
4168 {
4169 "map element value illegal alu op, 3",
4170 .insns = {
4171 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4173 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4174 BPF_LD_MAP_FD(BPF_REG_1, 0),
4175 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4177 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4178 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4179 BPF_EXIT_INSN(),
4180 },
4181 .fixup_map2 = { 3 },
4182 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4183 .errstr = "invalid mem access 'inv'",
4184 .result = REJECT,
4185 .result_unpriv = REJECT,
4186 },
4187 {
4188 "map element value illegal alu op, 4",
4189 .insns = {
4190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4192 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4193 BPF_LD_MAP_FD(BPF_REG_1, 0),
4194 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4195 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4196 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4197 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4198 BPF_EXIT_INSN(),
4199 },
4200 .fixup_map2 = { 3 },
4201 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4202 .errstr = "invalid mem access 'inv'",
4203 .result = REJECT,
4204 .result_unpriv = REJECT,
4205 },
4206 {
4207 "map element value illegal alu op, 5",
4208 .insns = {
4209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4211 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4212 BPF_LD_MAP_FD(BPF_REG_1, 0),
4213 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4215 BPF_MOV64_IMM(BPF_REG_3, 4096),
4216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4218 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4219 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4221 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4222 BPF_EXIT_INSN(),
4223 },
4224 .fixup_map2 = { 3 },
4225 .errstr_unpriv = "R0 invalid mem access 'inv'",
4226 .errstr = "R0 invalid mem access 'inv'",
4227 .result = REJECT,
4228 .result_unpriv = REJECT,
4229 },
4230 {
4231 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08004232 .insns = {
4233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4235 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4236 BPF_LD_MAP_FD(BPF_REG_1, 0),
4237 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4238 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
4240 offsetof(struct test_val, foo)),
4241 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4242 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4244 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4245 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4246 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4247 BPF_EXIT_INSN(),
4248 },
4249 .fixup_map2 = { 3 },
4250 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4251 .result = ACCEPT,
4252 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004253 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08004254 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08004255 {
4256 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
4257 .insns = {
4258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4260 BPF_MOV64_IMM(BPF_REG_0, 0),
4261 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4262 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4263 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4264 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4265 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4266 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4267 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4268 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4269 BPF_MOV64_IMM(BPF_REG_2, 16),
4270 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4271 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4272 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4273 BPF_MOV64_IMM(BPF_REG_4, 0),
4274 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4275 BPF_MOV64_IMM(BPF_REG_3, 0),
4276 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4277 BPF_MOV64_IMM(BPF_REG_0, 0),
4278 BPF_EXIT_INSN(),
4279 },
4280 .result = ACCEPT,
4281 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4282 },
4283 {
4284 "helper access to variable memory: stack, bitwise AND, zero included",
4285 .insns = {
4286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4288 BPF_MOV64_IMM(BPF_REG_2, 16),
4289 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4291 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4292 BPF_MOV64_IMM(BPF_REG_3, 0),
4293 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4294 BPF_EXIT_INSN(),
4295 },
4296 .errstr = "invalid stack type R1 off=-64 access_size=0",
4297 .result = REJECT,
4298 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4299 },
4300 {
4301 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4302 .insns = {
4303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4305 BPF_MOV64_IMM(BPF_REG_2, 16),
4306 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4307 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4308 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4309 BPF_MOV64_IMM(BPF_REG_4, 0),
4310 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4311 BPF_MOV64_IMM(BPF_REG_3, 0),
4312 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4313 BPF_MOV64_IMM(BPF_REG_0, 0),
4314 BPF_EXIT_INSN(),
4315 },
4316 .errstr = "invalid stack type R1 off=-64 access_size=65",
4317 .result = REJECT,
4318 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4319 },
4320 {
4321 "helper access to variable memory: stack, JMP, correct bounds",
4322 .insns = {
4323 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4325 BPF_MOV64_IMM(BPF_REG_0, 0),
4326 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4327 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4329 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4330 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4331 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4332 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4333 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4334 BPF_MOV64_IMM(BPF_REG_2, 16),
4335 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4336 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4337 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4338 BPF_MOV64_IMM(BPF_REG_4, 0),
4339 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4340 BPF_MOV64_IMM(BPF_REG_3, 0),
4341 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4342 BPF_MOV64_IMM(BPF_REG_0, 0),
4343 BPF_EXIT_INSN(),
4344 },
4345 .result = ACCEPT,
4346 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4347 },
4348 {
4349 "helper access to variable memory: stack, JMP (signed), correct bounds",
4350 .insns = {
4351 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4353 BPF_MOV64_IMM(BPF_REG_0, 0),
4354 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4355 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4356 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4357 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4359 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4360 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4361 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4362 BPF_MOV64_IMM(BPF_REG_2, 16),
4363 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4364 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4365 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4366 BPF_MOV64_IMM(BPF_REG_4, 0),
4367 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4368 BPF_MOV64_IMM(BPF_REG_3, 0),
4369 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4370 BPF_MOV64_IMM(BPF_REG_0, 0),
4371 BPF_EXIT_INSN(),
4372 },
4373 .result = ACCEPT,
4374 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4375 },
4376 {
4377 "helper access to variable memory: stack, JMP, bounds + offset",
4378 .insns = {
4379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4381 BPF_MOV64_IMM(BPF_REG_2, 16),
4382 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4383 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4384 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4385 BPF_MOV64_IMM(BPF_REG_4, 0),
4386 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4388 BPF_MOV64_IMM(BPF_REG_3, 0),
4389 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4390 BPF_MOV64_IMM(BPF_REG_0, 0),
4391 BPF_EXIT_INSN(),
4392 },
4393 .errstr = "invalid stack type R1 off=-64 access_size=65",
4394 .result = REJECT,
4395 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4396 },
4397 {
4398 "helper access to variable memory: stack, JMP, wrong max",
4399 .insns = {
4400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4402 BPF_MOV64_IMM(BPF_REG_2, 16),
4403 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4404 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4405 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4406 BPF_MOV64_IMM(BPF_REG_4, 0),
4407 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4408 BPF_MOV64_IMM(BPF_REG_3, 0),
4409 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4410 BPF_MOV64_IMM(BPF_REG_0, 0),
4411 BPF_EXIT_INSN(),
4412 },
4413 .errstr = "invalid stack type R1 off=-64 access_size=65",
4414 .result = REJECT,
4415 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4416 },
4417 {
4418 "helper access to variable memory: stack, JMP, no max check",
4419 .insns = {
4420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4422 BPF_MOV64_IMM(BPF_REG_2, 16),
4423 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4424 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4425 BPF_MOV64_IMM(BPF_REG_4, 0),
4426 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4427 BPF_MOV64_IMM(BPF_REG_3, 0),
4428 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4429 BPF_MOV64_IMM(BPF_REG_0, 0),
4430 BPF_EXIT_INSN(),
4431 },
4432 .errstr = "R2 unbounded memory access",
4433 .result = REJECT,
4434 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4435 },
4436 {
4437 "helper access to variable memory: stack, JMP, no min check",
4438 .insns = {
4439 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4441 BPF_MOV64_IMM(BPF_REG_2, 16),
4442 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4443 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4444 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4445 BPF_MOV64_IMM(BPF_REG_3, 0),
4446 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4447 BPF_MOV64_IMM(BPF_REG_0, 0),
4448 BPF_EXIT_INSN(),
4449 },
4450 .errstr = "invalid stack type R1 off=-64 access_size=0",
4451 .result = REJECT,
4452 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4453 },
4454 {
4455 "helper access to variable memory: stack, JMP (signed), no min check",
4456 .insns = {
4457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4459 BPF_MOV64_IMM(BPF_REG_2, 16),
4460 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4461 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4462 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4463 BPF_MOV64_IMM(BPF_REG_3, 0),
4464 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4465 BPF_MOV64_IMM(BPF_REG_0, 0),
4466 BPF_EXIT_INSN(),
4467 },
4468 .errstr = "R2 min value is negative",
4469 .result = REJECT,
4470 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4471 },
4472 {
4473 "helper access to variable memory: map, JMP, correct bounds",
4474 .insns = {
4475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4477 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4478 BPF_LD_MAP_FD(BPF_REG_1, 0),
4479 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4481 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4482 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4483 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4484 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4485 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4486 sizeof(struct test_val), 4),
4487 BPF_MOV64_IMM(BPF_REG_4, 0),
4488 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4489 BPF_MOV64_IMM(BPF_REG_3, 0),
4490 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4491 BPF_MOV64_IMM(BPF_REG_0, 0),
4492 BPF_EXIT_INSN(),
4493 },
4494 .fixup_map2 = { 3 },
4495 .result = ACCEPT,
4496 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4497 },
4498 {
4499 "helper access to variable memory: map, JMP, wrong max",
4500 .insns = {
4501 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4503 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4504 BPF_LD_MAP_FD(BPF_REG_1, 0),
4505 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4506 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4508 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4509 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4510 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4511 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4512 sizeof(struct test_val) + 1, 4),
4513 BPF_MOV64_IMM(BPF_REG_4, 0),
4514 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4515 BPF_MOV64_IMM(BPF_REG_3, 0),
4516 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4517 BPF_MOV64_IMM(BPF_REG_0, 0),
4518 BPF_EXIT_INSN(),
4519 },
4520 .fixup_map2 = { 3 },
4521 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4522 .result = REJECT,
4523 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4524 },
4525 {
4526 "helper access to variable memory: map adjusted, JMP, correct bounds",
4527 .insns = {
4528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4530 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4531 BPF_LD_MAP_FD(BPF_REG_1, 0),
4532 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4534 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4536 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4537 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4538 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4539 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4540 sizeof(struct test_val) - 20, 4),
4541 BPF_MOV64_IMM(BPF_REG_4, 0),
4542 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4543 BPF_MOV64_IMM(BPF_REG_3, 0),
4544 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4545 BPF_MOV64_IMM(BPF_REG_0, 0),
4546 BPF_EXIT_INSN(),
4547 },
4548 .fixup_map2 = { 3 },
4549 .result = ACCEPT,
4550 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4551 },
4552 {
4553 "helper access to variable memory: map adjusted, JMP, wrong max",
4554 .insns = {
4555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4557 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4558 BPF_LD_MAP_FD(BPF_REG_1, 0),
4559 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4563 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4564 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4565 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4566 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4567 sizeof(struct test_val) - 19, 4),
4568 BPF_MOV64_IMM(BPF_REG_4, 0),
4569 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4570 BPF_MOV64_IMM(BPF_REG_3, 0),
4571 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4572 BPF_MOV64_IMM(BPF_REG_0, 0),
4573 BPF_EXIT_INSN(),
4574 },
4575 .fixup_map2 = { 3 },
4576 .errstr = "R1 min value is outside of the array range",
4577 .result = REJECT,
4578 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4579 },
4580 {
4581 "helper access to variable memory: size > 0 not allowed on NULL",
4582 .insns = {
4583 BPF_MOV64_IMM(BPF_REG_1, 0),
4584 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004585 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4586 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004587 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4588 BPF_MOV64_IMM(BPF_REG_3, 0),
4589 BPF_MOV64_IMM(BPF_REG_4, 0),
4590 BPF_MOV64_IMM(BPF_REG_5, 0),
4591 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4592 BPF_EXIT_INSN(),
4593 },
4594 .errstr = "R1 type=imm expected=fp",
4595 .result = REJECT,
4596 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4597 },
4598 {
4599 "helper access to variable memory: size = 0 not allowed on != NULL",
4600 .insns = {
4601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4603 BPF_MOV64_IMM(BPF_REG_2, 0),
4604 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4605 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4606 BPF_MOV64_IMM(BPF_REG_3, 0),
4607 BPF_MOV64_IMM(BPF_REG_4, 0),
4608 BPF_MOV64_IMM(BPF_REG_5, 0),
4609 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4610 BPF_EXIT_INSN(),
4611 },
4612 .errstr = "invalid stack type R1 off=-8 access_size=0",
4613 .result = REJECT,
4614 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4615 },
4616 {
4617 "helper access to variable memory: 8 bytes leak",
4618 .insns = {
4619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4621 BPF_MOV64_IMM(BPF_REG_0, 0),
4622 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4623 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4624 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4625 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4626 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4627 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4628 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4629 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01004630 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4631 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08004632 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4634 BPF_MOV64_IMM(BPF_REG_3, 0),
4635 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4636 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4637 BPF_EXIT_INSN(),
4638 },
4639 .errstr = "invalid indirect read from stack off -64+32 size 64",
4640 .result = REJECT,
4641 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4642 },
4643 {
4644 "helper access to variable memory: 8 bytes no leak (init memory)",
4645 .insns = {
4646 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4647 BPF_MOV64_IMM(BPF_REG_0, 0),
4648 BPF_MOV64_IMM(BPF_REG_0, 0),
4649 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4650 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4651 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4652 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4653 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4654 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4655 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4656 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4658 BPF_MOV64_IMM(BPF_REG_2, 0),
4659 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4661 BPF_MOV64_IMM(BPF_REG_3, 0),
4662 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4663 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4664 BPF_EXIT_INSN(),
4665 },
4666 .result = ACCEPT,
4667 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4668 },
Josef Bacik29200c12017-02-03 16:25:23 -05004669 {
4670 "invalid and of negative number",
4671 .insns = {
4672 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4675 BPF_LD_MAP_FD(BPF_REG_1, 0),
4676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4677 BPF_FUNC_map_lookup_elem),
4678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4679 BPF_MOV64_IMM(BPF_REG_1, 6),
4680 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4681 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4682 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4683 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4684 offsetof(struct test_val, foo)),
4685 BPF_EXIT_INSN(),
4686 },
4687 .fixup_map2 = { 3 },
4688 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4689 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4690 .result = REJECT,
4691 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004692 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05004693 },
4694 {
4695 "invalid range check",
4696 .insns = {
4697 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4700 BPF_LD_MAP_FD(BPF_REG_1, 0),
4701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4702 BPF_FUNC_map_lookup_elem),
4703 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4704 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4705 BPF_MOV64_IMM(BPF_REG_9, 1),
4706 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4707 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4708 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4709 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4710 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4711 BPF_MOV32_IMM(BPF_REG_3, 1),
4712 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4713 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4714 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4715 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4716 BPF_MOV64_REG(BPF_REG_0, 0),
4717 BPF_EXIT_INSN(),
4718 },
4719 .fixup_map2 = { 3 },
4720 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4721 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4722 .result = REJECT,
4723 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004724 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004725 },
4726 {
4727 "map in map access",
4728 .insns = {
4729 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4730 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4732 BPF_LD_MAP_FD(BPF_REG_1, 0),
4733 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4734 BPF_FUNC_map_lookup_elem),
4735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4736 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4739 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4741 BPF_FUNC_map_lookup_elem),
4742 BPF_MOV64_REG(BPF_REG_0, 0),
4743 BPF_EXIT_INSN(),
4744 },
4745 .fixup_map_in_map = { 3 },
4746 .result = ACCEPT,
4747 },
4748 {
4749 "invalid inner map pointer",
4750 .insns = {
4751 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4752 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4754 BPF_LD_MAP_FD(BPF_REG_1, 0),
4755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4756 BPF_FUNC_map_lookup_elem),
4757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4758 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4764 BPF_FUNC_map_lookup_elem),
4765 BPF_MOV64_REG(BPF_REG_0, 0),
4766 BPF_EXIT_INSN(),
4767 },
4768 .fixup_map_in_map = { 3 },
4769 .errstr = "R1 type=inv expected=map_ptr",
4770 .errstr_unpriv = "R1 pointer arithmetic prohibited",
4771 .result = REJECT,
4772 },
4773 {
4774 "forgot null checking on the inner map pointer",
4775 .insns = {
4776 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4779 BPF_LD_MAP_FD(BPF_REG_1, 0),
4780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4781 BPF_FUNC_map_lookup_elem),
4782 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
4783 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4784 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
4785 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4786 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4787 BPF_FUNC_map_lookup_elem),
4788 BPF_MOV64_REG(BPF_REG_0, 0),
4789 BPF_EXIT_INSN(),
4790 },
4791 .fixup_map_in_map = { 3 },
4792 .errstr = "R1 type=map_value_or_null expected=map_ptr",
4793 .result = REJECT,
Josef Bacik29200c12017-02-03 16:25:23 -05004794 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004795};
4796
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004797static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004798{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004799 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004800
4801 for (len = MAX_INSNS - 1; len > 0; --len)
4802 if (fp[len].code != 0 || fp[len].imm != 0)
4803 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004804 return len + 1;
4805}
4806
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004807static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004808{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004809 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004810
Mickaël Salaünf4874d02017-02-10 00:21:43 +01004811 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004812 size_value, max_elem, BPF_F_NO_PREALLOC);
4813 if (fd < 0)
4814 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004815
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004816 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004817}
4818
4819static int create_prog_array(void)
4820{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004821 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07004822
Mickaël Salaünf4874d02017-02-10 00:21:43 +01004823 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004824 sizeof(int), 4, 0);
4825 if (fd < 0)
4826 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004827
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004828 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004829}
4830
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004831static int create_map_in_map(void)
4832{
4833 int inner_map_fd, outer_map_fd;
4834
4835 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
4836 sizeof(int), 1, 0);
4837 if (inner_map_fd < 0) {
4838 printf("Failed to create array '%s'!\n", strerror(errno));
4839 return inner_map_fd;
4840 }
4841
4842 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
4843 sizeof(int), inner_map_fd, 1, 0);
4844 if (outer_map_fd < 0)
4845 printf("Failed to create array of maps '%s'!\n",
4846 strerror(errno));
4847
4848 close(inner_map_fd);
4849
4850 return outer_map_fd;
4851}
4852
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004853static char bpf_vlog[32768];
4854
4855static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004856 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07004857{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004858 int *fixup_map1 = test->fixup_map1;
4859 int *fixup_map2 = test->fixup_map2;
4860 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004861 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004862
4863 /* Allocating HTs with 1 elem is fine here, since we only test
4864 * for verifier and not do a runtime lookup, so the only thing
4865 * that really matters is value size in this case.
4866 */
4867 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004868 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004869 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004870 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004871 fixup_map1++;
4872 } while (*fixup_map1);
4873 }
4874
4875 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004876 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004877 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004878 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004879 fixup_map2++;
4880 } while (*fixup_map2);
4881 }
4882
4883 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004884 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004885 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004886 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004887 fixup_prog++;
4888 } while (*fixup_prog);
4889 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004890
4891 if (*fixup_map_in_map) {
4892 map_fds[3] = create_map_in_map();
4893 do {
4894 prog[*fixup_map_in_map].imm = map_fds[3];
4895 fixup_map_in_map++;
4896 } while (*fixup_map_in_map);
4897 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004898}
4899
4900static void do_test_single(struct bpf_test *test, bool unpriv,
4901 int *passes, int *errors)
4902{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004903 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004904 struct bpf_insn *prog = test->insns;
4905 int prog_len = probe_filter_length(prog);
4906 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004907 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004908 const char *expected_err;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004909 int i;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004910
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004911 for (i = 0; i < MAX_NR_MAPS; i++)
4912 map_fds[i] = -1;
4913
4914 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004915
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +01004916 fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
4917 prog, prog_len, "GPL", 0, bpf_vlog,
4918 sizeof(bpf_vlog));
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004919
4920 expected_ret = unpriv && test->result_unpriv != UNDEF ?
4921 test->result_unpriv : test->result;
4922 expected_err = unpriv && test->errstr_unpriv ?
4923 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004924
4925 reject_from_alignment = fd_prog < 0 &&
4926 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
4927 strstr(bpf_vlog, "Unknown alignment.");
4928#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
4929 if (reject_from_alignment) {
4930 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
4931 strerror(errno));
4932 goto fail_log;
4933 }
4934#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004935 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004936 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004937 printf("FAIL\nFailed to load prog '%s'!\n",
4938 strerror(errno));
4939 goto fail_log;
4940 }
4941 } else {
4942 if (fd_prog >= 0) {
4943 printf("FAIL\nUnexpected success to load!\n");
4944 goto fail_log;
4945 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004946 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004947 printf("FAIL\nUnexpected error message!\n");
4948 goto fail_log;
4949 }
4950 }
4951
4952 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004953 printf("OK%s\n", reject_from_alignment ?
4954 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004955close_fds:
4956 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07004957 for (i = 0; i < MAX_NR_MAPS; i++)
4958 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004959 sched_yield();
4960 return;
4961fail_log:
4962 (*errors)++;
4963 printf("%s", bpf_vlog);
4964 goto close_fds;
4965}
4966
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004967static bool is_admin(void)
4968{
4969 cap_t caps;
4970 cap_flag_value_t sysadmin = CAP_CLEAR;
4971 const cap_value_t cap_val = CAP_SYS_ADMIN;
4972
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08004973#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004974 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
4975 perror("cap_get_flag");
4976 return false;
4977 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08004978#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +01004979 caps = cap_get_proc();
4980 if (!caps) {
4981 perror("cap_get_proc");
4982 return false;
4983 }
4984 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
4985 perror("cap_get_flag");
4986 if (cap_free(caps))
4987 perror("cap_free");
4988 return (sysadmin == CAP_SET);
4989}
4990
4991static int set_admin(bool admin)
4992{
4993 cap_t caps;
4994 const cap_value_t cap_val = CAP_SYS_ADMIN;
4995 int ret = -1;
4996
4997 caps = cap_get_proc();
4998 if (!caps) {
4999 perror("cap_get_proc");
5000 return -1;
5001 }
5002 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
5003 admin ? CAP_SET : CAP_CLEAR)) {
5004 perror("cap_set_flag");
5005 goto out;
5006 }
5007 if (cap_set_proc(caps)) {
5008 perror("cap_set_proc");
5009 goto out;
5010 }
5011 ret = 0;
5012out:
5013 if (cap_free(caps))
5014 perror("cap_free");
5015 return ret;
5016}
5017
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005018static int do_test(bool unpriv, unsigned int from, unsigned int to)
5019{
5020 int i, passes = 0, errors = 0;
5021
5022 for (i = from; i < to; i++) {
5023 struct bpf_test *test = &tests[i];
5024
5025 /* Program types that are not supported by non-root we
5026 * skip right away.
5027 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005028 if (!test->prog_type) {
5029 if (!unpriv)
5030 set_admin(false);
5031 printf("#%d/u %s ", i, test->descr);
5032 do_test_single(test, true, &passes, &errors);
5033 if (!unpriv)
5034 set_admin(true);
5035 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005036
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005037 if (!unpriv) {
5038 printf("#%d/p %s ", i, test->descr);
5039 do_test_single(test, false, &passes, &errors);
5040 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005041 }
5042
5043 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
5044 return errors ? -errors : 0;
5045}
5046
5047int main(int argc, char **argv)
5048{
5049 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
5050 struct rlimit rlim = { 1 << 20, 1 << 20 };
5051 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +01005052 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005053
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005054 if (argc == 3) {
5055 unsigned int l = atoi(argv[argc - 2]);
5056 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005057
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005058 if (l < to && u < to) {
5059 from = l;
5060 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005061 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005062 } else if (argc == 2) {
5063 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005064
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005065 if (t < to) {
5066 from = t;
5067 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07005068 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005069 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005070
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02005071 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
5072 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07005073}