blob: d3ed7324105e4eeeb281c4c90922c27541b2b58f [file] [log] [blame]
/*
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <endian.h>
#include <asm/types.h>
#include <linux/types.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stddef.h>
#include <stdbool.h>
#include <sched.h>
#include <sys/capability.h>
#include <sys/resource.h>
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#ifdef HAVE_GENHDR
# include "autoconf.h"
#else
# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#define MAX_INSNS 512
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 4
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
int fixup_map1[MAX_FIXUPS];
int fixup_map2[MAX_FIXUPS];
int fixup_prog[MAX_FIXUPS];
int fixup_map_in_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
enum {
UNDEF,
ACCEPT,
REJECT
} result, result_unpriv;
enum bpf_prog_type prog_type;
uint8_t flags;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
* actually the end of the structure.
*/
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
static struct bpf_test tests[] = {
{
"add+sub+mul",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_2, 3),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unreachable",
.insns = {
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"unreachable2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"out of range jump",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"out of range jump2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -2),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"test1 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test2 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test3 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test4 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test5 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test6 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test7 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test8 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "uses reserved fields",
.result = REJECT,
},
{
"test9 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 1, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test10 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test11 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test12 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "not pointing to valid bpf_map",
.result = REJECT,
},
{
"test13 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"loop (back-edge)",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"loop2 (back-edge)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"conditional loop",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"read uninitialized register",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"read invalid register",
.insns = {
BPF_MOV64_REG(BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"program doesn't init R0 before exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"program doesn't init R0 before exit in all branches",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"stack out of bounds",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack",
.result = REJECT,
},
{
"invalid call insn1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid call insn2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid function call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
BPF_EXIT_INSN(),
},
.errstr = "invalid func unknown#1234567",
.result = REJECT,
},
{
"uninitialized stack1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 2 },
.errstr = "invalid indirect read from stack",
.result = REJECT,
},
{
"uninitialized stack2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.errstr = "invalid read from stack",
.result = REJECT,
},
{
"invalid fp arithmetic",
/* If this gets ever changed, make sure JITs can deal with it. */
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer arithmetic",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.result = REJECT,
},
{
"non-invalid fp arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"invalid argument register",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"non-invalid argument register",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"check valid spill/fill",
.insns = {
/* spill R1(ctx) into stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* fill it back into R2 */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
/* should be able to access R0 = *(R2 + 8) */
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
},
{
"check valid spill/fill, skb mark",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = ACCEPT,
},
{
"check corrupted spill/fill",
.insns = {
/* spill R1(ctx) into stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* mess up with R1 pointer on stack */
BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
/* fill back into R0 should fail */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.errstr = "corrupted spill",
.result = REJECT,
},
{
"invalid src register in STX",
.insns = {
BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"invalid dst register in STX",
.insns = {
BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid dst register in ST",
.insns = {
BPF_ST_MEM(BPF_B, 14, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid src register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
BPF_EXIT_INSN(),
},
.errstr = "R12 is invalid",
.result = REJECT,
},
{
"invalid dst register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr = "R11 is invalid",
.result = REJECT,
},
{
"junk insn",
.insns = {
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM",
.result = REJECT,
},
{
"junk insn2",
.insns = {
BPF_RAW_INSN(1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_LDX uses reserved fields",
.result = REJECT,
},
{
"junk insn3",
.insns = {
BPF_RAW_INSN(-1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_ALU opcode f0",
.result = REJECT,
},
{
"junk insn4",
.insns = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_ALU opcode f0",
.result = REJECT,
},
{
"junk insn5",
.insns = {
BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ALU uses reserved fields",
.result = REJECT,
},
{
"misaligned read from stack",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"invalid map_fd for function call",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.errstr = "fd 0 is not pointing to valid bpf_map",
.result = REJECT,
},
{
"don't check return value before access",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 invalid mem access 'map_value_or_null'",
.result = REJECT,
},
{
"access memory with incorrect alignment",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "misaligned access",
.result = REJECT,
},
{
"sometimes access memory with incorrect alignment",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 invalid mem access",
.errstr_unpriv = "R0 leaks addr",
.result = REJECT,
},
{
"jump test 1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 14),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 5),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 3",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 19),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_JMP_IMM(BPF_JA, 0, 0, 15),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
BPF_JMP_IMM(BPF_JA, 0, 0, 7),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 24 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 4",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 5",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"access skb fields ok",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, queue_mapping)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, protocol)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_present)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_tci)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, napi_id)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"access skb fields bad1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"access skb fields bad2",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad3",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -12),
},
.fixup_map1 = { 6 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad4",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -13),
},
.fixup_map1 = { 7 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"check skb->mark is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check skb->tc_index is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check cb access: byte",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"__sk_buff->hash, offset 0, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"__sk_buff->tc_index, offset 3, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index) + 3),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash byte load not permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load not permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load not permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: byte, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: half",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: half, unaligned",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check __sk_buff->hash, offset 0, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->tc_index, offset 2, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: half, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: word",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: word, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: word, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: word, unaligned 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: word, unaligned 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: double",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: double, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: double, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: double, oob 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->ifindex dw store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->ifindex dw load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check out of range skb->cb access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 256),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
},
{
"write skb fields from socket prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
},
{
"write skb fields from tc_cls_act prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "",
.result_unpriv = REJECT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"PTR_TO_STACK store/load",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"PTR_TO_STACK store/load - bad alignment on off",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned access off -6 size 8",
},
{
"PTR_TO_STACK store/load - bad alignment on reg",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned access off -2 size 8",
},
{
"PTR_TO_STACK store/load - out of bounds low",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off=-79992 size=8",
},
{
"PTR_TO_STACK store/load - out of bounds high",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off=0 size=8",
},
{
"unpriv: return pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr",
},
{
"unpriv: add const to pointer",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: add pointer to pointer",
.insns = {
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: neg pointer",
.insns = {
BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: cmp pointer with const",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer comparison",
},
{
"unpriv: cmp pointer with pointer",
.insns = {
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R10 pointer comparison",
},
{
"unpriv: check that printk is disallowed",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_trace_printk),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "unknown func bpf_trace_printk#6",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: pass pointer to helper function",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R4 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: indirectly pass pointer on stack to helper function",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "invalid indirect read from stack off -8+0 size 8",
.result = REJECT,
},
{
"unpriv: mangle pointer on stack 1",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: mangle pointer on stack 2",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: read pointer from stack in small chunks",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid size",
.result = REJECT,
},
{
"unpriv: write pointer into ctx",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"unpriv: spill/fill of ctx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unpriv: spill/fill of ctx 2",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of ctx 3",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=fp expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of ctx 4",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
BPF_REG_0, -8, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of different pointers stx",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 42),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "same insn cannot be used with different pointers",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of different pointers ldx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
-(__s32)offsetof(struct bpf_perf_event_data,
sample_period) - 8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
offsetof(struct bpf_perf_event_data,
sample_period)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "same insn cannot be used with different pointers",
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"unpriv: write pointer into map elem value",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: partial copy of pointer",
.insns = {
BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 partial copy",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: pass pointer to tail_call",
.insns = {
BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.errstr_unpriv = "R3 leaks addr into helper",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: cmp map pointer with zero",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: write into frame pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "frame pointer is read only",
.result = REJECT,
},
{
"unpriv: spill/fill frame pointer",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "frame pointer is read only",
.result = REJECT,
},
{
"unpriv: cmp of frame pointer",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: adding of fp",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "pointer arithmetic prohibited",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.result = REJECT,
},
{
"unpriv: cmp of stack pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R2 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"stack pointer arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
BPF_ST_MEM(0, BPF_REG_2, 4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_ST_MEM(0, BPF_REG_2, 4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"raw_stack: no skb_load_bytes",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
/* Call to skb_load_bytes() omitted. */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack off -8+0 size 8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, negative len",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, negative len 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, ~0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, zero len",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, no init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs around bounds",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
offsetof(struct __sk_buff, pkt_type)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs + data",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-513 access_size=8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=-1",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=2147483647",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-512 access_size=2147483647",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-512 access_size=0",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, large access",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 512),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access off=76",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
},
{
"direct packet access: test4 (write)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test5 (pkt_end >= reg, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test6 (pkt_end >= reg, bad access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test7 (pkt_end >= reg, both accesses)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test8 (double test, variant 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test9 (double test, variant 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test10 (write invalid)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test11 (shift, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_3, 144),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test12 (and, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_3, 144),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test13 (branches, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
BPF_MOV64_IMM(BPF_REG_3, 14),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_3, 24),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
BPF_MOV64_IMM(BPF_REG_5, 12),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test15 (spill with xadd)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_5, 4096),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R2 invalid mem access 'inv'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test16 (arith on data_end)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test17 (pruning, alignment)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_A(-6),
},
.errstr = "misaligned packet access off 2+15+-4 size 4",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"direct packet access: test18 (imm += pkt_ptr, 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 8),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test19 (imm += pkt_ptr, 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
BPF_MOV64_IMM(BPF_REG_4, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test20 (x += pkt_ptr, 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"direct packet access: test21 (x += pkt_ptr, 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"direct packet access: test22 (x += pkt_ptr, 3)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"direct packet access: test23 (x += pkt_ptr, 4)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 31),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "cannot add integer value with 47 upper zero bits to ptr_to_packet",
},
{
"direct packet access: test24 (x += pkt_ptr, 5)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 64),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 5 },
.result_unpriv = ACCEPT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test2, unchecked packet_ptr",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test3, variable add",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test4, packet_ptr with bad range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 7 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test5, packet_ptr with too short range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 6 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test6, cls valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 5 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test7, cls unchecked packet_ptr",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test8, cls variable add",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test9, cls packet_ptr with bad range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 7 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test10, cls packet_ptr with too short range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 6 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test11, cls unsuitable helper 1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_4, 42),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_store_bytes),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "helper access to the packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test12, cls unsuitable helper 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_4, 4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "helper access to the packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test13, cls helper ok",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test14, cls helper fail sub",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "type=inv expected=fp",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test15, cls helper fail range 1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test16, cls helper fail range 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, -9),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test17, cls helper fail range 3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, ~0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test18, cls helper fail range zero",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test19, pkt end as input",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=pkt_end expected=fp",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test20, wrong reg",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"valid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"valid map access into an array with a register",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a signed variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=48 size=8",
.result = REJECT,
},
{
"invalid map access into an array with a register",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is outside of the array range",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with no floor check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3, 11 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"multiple registers share map_lookup_elem result",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"invalid memory access with multiple map_lookup_elem calls",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = REJECT,
.errstr = "R4 !read_ok",
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"valid indirect map_lookup_elem access with 2nd lookup in branch",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_2, 10),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"multiple registers share map_lookup_elem bad reg type",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
BPF_MOV64_IMM(BPF_REG_1, 3),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"invalid map access from else condition",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
.result = REJECT,
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"constant register |= constant should keep constant type",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant should not bypass stack boundary checks",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-48 access_size=58",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant register should keep constant type",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_4, 13),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant register should not bypass stack boundary checks",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_4, 24),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-48 access_size=58",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"invalid direct packet write for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "cannot write into packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"invalid direct packet write for LWT_OUT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "cannot write into packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_OUT,
},
{
"direct packet write for LWT_XMIT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"direct packet read for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"direct packet read for LWT_OUT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_OUT,
},
{
"direct packet read for LWT_XMIT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"overlapping checks for direct packet access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"invalid access of tc_classid for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"invalid access of tc_classid for LWT_OUT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"invalid access of tc_classid for LWT_XMIT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"leak pointer into ctx 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 2 },
.errstr_unpriv = "R2 leaks addr into mem",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"leak pointer into ctx 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 leaks addr into mem",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"leak pointer into ctx 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.errstr_unpriv = "R2 leaks addr into ctx",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"leak pointer into map val",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr_unpriv = "R6 leaks addr into mem",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"helper access to map: full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: out-of-bound range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=56",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: negative range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=-8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): out-of-bound range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): negative range (> adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=-8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): negative range (< adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): out-of-bound range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): negative range (> adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=-8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): negative range (< adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): no max check",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): wrong max check",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo) + 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=45",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"map element value is preserved across register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
},
{
"map element value or null is marked on register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
},
{
"map element value store of cleared call register",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R1 !read_ok",
.errstr = "R1 !read_ok",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value with unaligned store",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result = ACCEPT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map element value with unaligned load",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result = ACCEPT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map element value illegal alu op, 1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 3",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 4",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 5",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_IMM(BPF_REG_3, 4096),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value is preserved across register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
offsetof(struct test_val, foo)),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result = ACCEPT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=65",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP (signed), correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, bounds + offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=65",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=65",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, no max check",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R2 unbounded memory access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP (signed), no min check",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map, JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val), 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map, JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) + 1, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map adjusted, JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) - 20, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map adjusted, JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) - 19, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size > 0 not allowed on NULL",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=imm expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size = 0 not allowed on != NULL",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-8 access_size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: 8 bytes leak",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack off -64+32 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: 8 bytes no leak (init memory)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"invalid and of negative number",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 6),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid range check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
BPF_MOV32_IMM(BPF_REG_3, 1),
BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map in map access",
.insns = {
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
.result = ACCEPT,
},
{
"invalid inner map pointer",
.insns = {
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
.errstr = "R1 type=inv expected=map_ptr",
.errstr_unpriv = "R1 pointer arithmetic prohibited",
.result = REJECT,
},
{
"forgot null checking on the inner map pointer",
.insns = {
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
.errstr = "R1 type=map_value_or_null expected=map_ptr",
.result = REJECT,
},
{
"ld_abs: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"ld_ind: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 1),
BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 1),
BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check bpf_perf_event_data->sample_period byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 7),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 6),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period word load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 4),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period dword load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check skb->data half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"check skb->tc_classid half load not permitted for lwt prog",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"bounds checks mixing signed and unsigned, positive bounds",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 2),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R8 invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R8 invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 4",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 5",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 invalid mem access",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_6, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R4 min value is negative, either use unsigned",
.errstr = "R4 min value is negative, either use unsigned",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 7",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 8",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 9",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 10",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 11",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 12",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
/* Dead branch. */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 13",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -6),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 14",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, 2),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_7, 1),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 15",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_8, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map1 = { 4 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"bounds checks mixing signed and unsigned, variant 16",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_MOV64_IMM(BPF_REG_2, -6),
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"subtraction bounds (map value)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
},
};
static int probe_filter_length(const struct bpf_insn *fp)
{
int len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static int create_map(uint32_t size_value, uint32_t max_elem)
{
int fd;
fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
size_value, max_elem, BPF_F_NO_PREALLOC);
if (fd < 0)
printf("Failed to create hash map '%s'!\n", strerror(errno));
return fd;
}
static int create_prog_array(void)
{
int fd;
fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), 4, 0);
if (fd < 0)
printf("Failed to create prog array '%s'!\n", strerror(errno));
return fd;
}
static int create_map_in_map(void)
{
int inner_map_fd, outer_map_fd;
inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(int), 1, 0);
if (inner_map_fd < 0) {
printf("Failed to create array '%s'!\n", strerror(errno));
return inner_map_fd;
}
outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
sizeof(int), inner_map_fd, 1, 0);
if (outer_map_fd < 0)
printf("Failed to create array of maps '%s'!\n",
strerror(errno));
close(inner_map_fd);
return outer_map_fd;
}
static char bpf_vlog[32768];
static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
int *map_fds)
{
int *fixup_map1 = test->fixup_map1;
int *fixup_map2 = test->fixup_map2;
int *fixup_prog = test->fixup_prog;
int *fixup_map_in_map = test->fixup_map_in_map;
/* Allocating HTs with 1 elem is fine here, since we only test
* for verifier and not do a runtime lookup, so the only thing
* that really matters is value size in this case.
*/
if (*fixup_map1) {
map_fds[0] = create_map(sizeof(long long), 1);
do {
prog[*fixup_map1].imm = map_fds[0];
fixup_map1++;
} while (*fixup_map1);
}
if (*fixup_map2) {
map_fds[1] = create_map(sizeof(struct test_val), 1);
do {
prog[*fixup_map2].imm = map_fds[1];
fixup_map2++;
} while (*fixup_map2);
}
if (*fixup_prog) {
map_fds[2] = create_prog_array();
do {
prog[*fixup_prog].imm = map_fds[2];
fixup_prog++;
} while (*fixup_prog);
}
if (*fixup_map_in_map) {
map_fds[3] = create_map_in_map();
do {
prog[*fixup_map_in_map].imm = map_fds[3];
fixup_map_in_map++;
} while (*fixup_map_in_map);
}
}
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
int fd_prog, expected_ret, reject_from_alignment;
struct bpf_insn *prog = test->insns;
int prog_len = probe_filter_length(prog);
int prog_type = test->prog_type;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
int i;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
do_test_fixup(test, prog, map_fds);
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
reject_from_alignment = fd_prog < 0 &&
(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
strstr(bpf_vlog, "Unknown alignment.");
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (reject_from_alignment) {
printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
strerror(errno));
goto fail_log;
}
#endif
if (expected_ret == ACCEPT) {
if (fd_prog < 0 && !reject_from_alignment) {
printf("FAIL\nFailed to load prog '%s'!\n",
strerror(errno));
goto fail_log;
}
} else {
if (fd_prog >= 0) {
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
printf("FAIL\nUnexpected error message!\n");
goto fail_log;
}
}
(*passes)++;
printf("OK%s\n", reject_from_alignment ?
" (NOTE: reject due to unknown alignment)" : "");
close_fds:
close(fd_prog);
for (i = 0; i < MAX_NR_MAPS; i++)
close(map_fds[i]);
sched_yield();
return;
fail_log:
(*errors)++;
printf("%s", bpf_vlog);
goto close_fds;
}
static bool is_admin(void)
{
cap_t caps;
cap_flag_value_t sysadmin = CAP_CLEAR;
const cap_value_t cap_val = CAP_SYS_ADMIN;
#ifdef CAP_IS_SUPPORTED
if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
perror("cap_get_flag");
return false;
}
#endif
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return false;
}
if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
perror("cap_get_flag");
if (cap_free(caps))
perror("cap_free");
return (sysadmin == CAP_SET);
}
static int set_admin(bool admin)
{
cap_t caps;
const cap_value_t cap_val = CAP_SYS_ADMIN;
int ret = -1;
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return -1;
}
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
admin ? CAP_SET : CAP_CLEAR)) {
perror("cap_set_flag");
goto out;
}
if (cap_set_proc(caps)) {
perror("cap_set_proc");
goto out;
}
ret = 0;
out:
if (cap_free(caps))
perror("cap_free");
return ret;
}
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
int i, passes = 0, errors = 0;
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
/* Program types that are not supported by non-root we
* skip right away.
*/
if (!test->prog_type) {
if (!unpriv)
set_admin(false);
printf("#%d/u %s ", i, test->descr);
do_test_single(test, true, &passes, &errors);
if (!unpriv)
set_admin(true);
}
if (!unpriv) {
printf("#%d/p %s ", i, test->descr);
do_test_single(test, false, &passes, &errors);
}
}
printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
return errors ? EXIT_FAILURE : EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
struct rlimit rlim = { 1 << 20, 1 << 20 };
unsigned int from = 0, to = ARRAY_SIZE(tests);
bool unpriv = !is_admin();
if (argc == 3) {
unsigned int l = atoi(argv[argc - 2]);
unsigned int u = atoi(argv[argc - 1]);
if (l < to && u < to) {
from = l;
to = u + 1;
}
} else if (argc == 2) {
unsigned int t = atoi(argv[argc - 1]);
if (t < to) {
from = t;
to = t + 1;
}
}
setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
return do_test(unpriv, from, to);
}