blob: ca95abd2bed130ac3c76161e7ae273e59c48c51b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Linux Socket Filter Data Structures
3 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#ifndef __LINUX_FILTER_H__
5#define __LINUX_FILTER_H__
6
Daniel Borkmannb954d832014-09-10 15:01:02 +02007#include <stdarg.h>
8
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Will Drewry0c5fe1b2012-04-12 16:47:53 -050010#include <linux/compat.h>
Zi Shen Lim9f12fbe2014-07-03 07:56:54 -070011#include <linux/skbuff.h>
Daniel Borkmannb954d832014-09-10 15:01:02 +020012#include <linux/linkage.h>
13#include <linux/printk.h>
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -070014#include <linux/workqueue.h>
Daniel Borkmannb954d832014-09-10 15:01:02 +020015
Daniel Borkmann60a3b222014-09-02 22:53:44 +020016#include <asm/cacheflush.h>
Daniel Borkmannb954d832014-09-10 15:01:02 +020017
18#include <uapi/linux/filter.h>
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -070019#include <uapi/linux/bpf.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020020
21struct sk_buff;
22struct sock;
23struct seccomp_data;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070024struct bpf_prog_aux;
Heiko Carstens792d4b52011-05-22 07:08:11 +000025
Daniel Borkmann30743832014-05-01 18:34:19 +020026/* ArgX, context and stack frame pointer register positions. Note,
27 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
28 * calls in BPF_CALL instruction.
29 */
30#define BPF_REG_ARG1 BPF_REG_1
31#define BPF_REG_ARG2 BPF_REG_2
32#define BPF_REG_ARG3 BPF_REG_3
33#define BPF_REG_ARG4 BPF_REG_4
34#define BPF_REG_ARG5 BPF_REG_5
35#define BPF_REG_CTX BPF_REG_6
36#define BPF_REG_FP BPF_REG_10
37
38/* Additional register mappings for converted user programs. */
39#define BPF_REG_A BPF_REG_0
40#define BPF_REG_X BPF_REG_7
41#define BPF_REG_TMP BPF_REG_8
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010042
43/* BPF program can access up to 512 bytes of stack space. */
44#define MAX_BPF_STACK 512
45
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020046/* Helper macros for filter block array initializers. */
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070047
Alexei Starovoitove430f342014-06-06 14:46:06 -070048/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070049
Alexei Starovoitove430f342014-06-06 14:46:06 -070050#define BPF_ALU64_REG(OP, DST, SRC) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -070051 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020052 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
Alexei Starovoitove430f342014-06-06 14:46:06 -070053 .dst_reg = DST, \
54 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020055 .off = 0, \
56 .imm = 0 })
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070057
Alexei Starovoitove430f342014-06-06 14:46:06 -070058#define BPF_ALU32_REG(OP, DST, SRC) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -070059 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020060 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
Alexei Starovoitove430f342014-06-06 14:46:06 -070061 .dst_reg = DST, \
62 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020063 .off = 0, \
64 .imm = 0 })
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070065
Alexei Starovoitove430f342014-06-06 14:46:06 -070066/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070067
Alexei Starovoitove430f342014-06-06 14:46:06 -070068#define BPF_ALU64_IMM(OP, DST, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -070069 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020070 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
Alexei Starovoitove430f342014-06-06 14:46:06 -070071 .dst_reg = DST, \
72 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020073 .off = 0, \
74 .imm = IMM })
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070075
Alexei Starovoitove430f342014-06-06 14:46:06 -070076#define BPF_ALU32_IMM(OP, DST, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -070077 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020078 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
Alexei Starovoitove430f342014-06-06 14:46:06 -070079 .dst_reg = DST, \
80 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020081 .off = 0, \
82 .imm = IMM })
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070083
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020084/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
Alexei Starovoitov9739eef2014-05-08 14:10:51 -070085
Alexei Starovoitove430f342014-06-06 14:46:06 -070086#define BPF_ENDIAN(TYPE, DST, LEN) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -070087 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020088 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
Alexei Starovoitove430f342014-06-06 14:46:06 -070089 .dst_reg = DST, \
90 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020091 .off = 0, \
92 .imm = LEN })
93
Alexei Starovoitove430f342014-06-06 14:46:06 -070094/* Short form of mov, dst_reg = src_reg */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020095
Alexei Starovoitove430f342014-06-06 14:46:06 -070096#define BPF_MOV64_REG(DST, SRC) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -070097 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +020098 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
Alexei Starovoitove430f342014-06-06 14:46:06 -070099 .dst_reg = DST, \
100 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200101 .off = 0, \
102 .imm = 0 })
103
Alexei Starovoitove430f342014-06-06 14:46:06 -0700104#define BPF_MOV32_REG(DST, SRC) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700105 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200106 .code = BPF_ALU | BPF_MOV | BPF_X, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700107 .dst_reg = DST, \
108 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200109 .off = 0, \
110 .imm = 0 })
111
Alexei Starovoitove430f342014-06-06 14:46:06 -0700112/* Short form of mov, dst_reg = imm32 */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200113
Alexei Starovoitove430f342014-06-06 14:46:06 -0700114#define BPF_MOV64_IMM(DST, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700115 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200116 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700117 .dst_reg = DST, \
118 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200119 .off = 0, \
120 .imm = IMM })
121
Alexei Starovoitove430f342014-06-06 14:46:06 -0700122#define BPF_MOV32_IMM(DST, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700123 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200124 .code = BPF_ALU | BPF_MOV | BPF_K, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700125 .dst_reg = DST, \
126 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200127 .off = 0, \
128 .imm = IMM })
129
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700130/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
131#define BPF_LD_IMM64(DST, IMM) \
132 BPF_LD_IMM64_RAW(DST, 0, IMM)
133
134#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_LD | BPF_DW | BPF_IMM, \
137 .dst_reg = DST, \
138 .src_reg = SRC, \
139 .off = 0, \
140 .imm = (__u32) (IMM) }), \
141 ((struct bpf_insn) { \
142 .code = 0, /* zero is reserved opcode */ \
143 .dst_reg = 0, \
144 .src_reg = 0, \
145 .off = 0, \
146 .imm = ((__u64) (IMM)) >> 32 })
147
Alexei Starovoitov0246e642014-09-26 00:17:04 -0700148#define BPF_PSEUDO_MAP_FD 1
149
150/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
151#define BPF_LD_MAP_FD(DST, MAP_FD) \
152 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
153
Alexei Starovoitove430f342014-06-06 14:46:06 -0700154/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200155
Alexei Starovoitove430f342014-06-06 14:46:06 -0700156#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700157 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200158 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700159 .dst_reg = DST, \
160 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200161 .off = 0, \
162 .imm = IMM })
163
Alexei Starovoitove430f342014-06-06 14:46:06 -0700164#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700165 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200166 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700167 .dst_reg = DST, \
168 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200169 .off = 0, \
170 .imm = IMM })
171
Alexei Starovoitove430f342014-06-06 14:46:06 -0700172/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200173
Alexei Starovoitove430f342014-06-06 14:46:06 -0700174#define BPF_LD_ABS(SIZE, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700175 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200176 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700177 .dst_reg = 0, \
178 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200179 .off = 0, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700180 .imm = IMM })
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200181
Alexei Starovoitove430f342014-06-06 14:46:06 -0700182/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200183
Alexei Starovoitove430f342014-06-06 14:46:06 -0700184#define BPF_LD_IND(SIZE, SRC, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700185 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200186 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700187 .dst_reg = 0, \
188 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200189 .off = 0, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700190 .imm = IMM })
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200191
Alexei Starovoitove430f342014-06-06 14:46:06 -0700192/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200193
Alexei Starovoitove430f342014-06-06 14:46:06 -0700194#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700195 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200196 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700197 .dst_reg = DST, \
198 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200199 .off = OFF, \
200 .imm = 0 })
201
Alexei Starovoitove430f342014-06-06 14:46:06 -0700202/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
203
204#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700205 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200206 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700207 .dst_reg = DST, \
208 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200209 .off = OFF, \
210 .imm = 0 })
211
Alexei Starovoitove430f342014-06-06 14:46:06 -0700212/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200213
Alexei Starovoitove430f342014-06-06 14:46:06 -0700214#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700215 ((struct bpf_insn) { \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700216 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
217 .dst_reg = DST, \
218 .src_reg = 0, \
219 .off = OFF, \
220 .imm = IMM })
221
222/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
223
224#define BPF_JMP_REG(OP, DST, SRC, OFF) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700225 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200226 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700227 .dst_reg = DST, \
228 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200229 .off = OFF, \
230 .imm = 0 })
231
Alexei Starovoitove430f342014-06-06 14:46:06 -0700232/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200233
Alexei Starovoitove430f342014-06-06 14:46:06 -0700234#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700235 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200236 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700237 .dst_reg = DST, \
238 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200239 .off = OFF, \
240 .imm = IMM })
241
242/* Function call */
243
244#define BPF_EMIT_CALL(FUNC) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700245 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200246 .code = BPF_JMP | BPF_CALL, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700247 .dst_reg = 0, \
248 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200249 .off = 0, \
250 .imm = ((FUNC) - __bpf_call_base) })
251
252/* Raw code statement block */
253
Alexei Starovoitove430f342014-06-06 14:46:06 -0700254#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700255 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200256 .code = CODE, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700257 .dst_reg = DST, \
258 .src_reg = SRC, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200259 .off = OFF, \
260 .imm = IMM })
261
262/* Program exit */
263
264#define BPF_EXIT_INSN() \
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700265 ((struct bpf_insn) { \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200266 .code = BPF_JMP | BPF_EXIT, \
Alexei Starovoitove430f342014-06-06 14:46:06 -0700267 .dst_reg = 0, \
268 .src_reg = 0, \
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200269 .off = 0, \
270 .imm = 0 })
271
272#define bytes_to_bpf_size(bytes) \
273({ \
274 int bpf_size = -EINVAL; \
275 \
276 if (bytes == sizeof(u8)) \
277 bpf_size = BPF_B; \
278 else if (bytes == sizeof(u16)) \
279 bpf_size = BPF_H; \
280 else if (bytes == sizeof(u32)) \
281 bpf_size = BPF_W; \
282 else if (bytes == sizeof(u64)) \
283 bpf_size = BPF_DW; \
284 \
285 bpf_size; \
286})
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700287
Daniel Borkmann30743832014-05-01 18:34:19 +0200288/* Macro to invoke filter function. */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700289#define SK_RUN_FILTER(filter, ctx) \
290 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100291
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100292#ifdef CONFIG_COMPAT
293/* A struct sock_filter is architecture independent. */
Will Drewry0c5fe1b2012-04-12 16:47:53 -0500294struct compat_sock_fprog {
295 u16 len;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100296 compat_uptr_t filter; /* struct sock_filter * */
Will Drewry0c5fe1b2012-04-12 16:47:53 -0500297};
298#endif
299
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100300struct sock_fprog_kern {
301 u16 len;
302 struct sock_filter *filter;
303};
304
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200305struct bpf_binary_header {
306 unsigned int pages;
307 u8 image[];
308};
309
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700310struct bpf_prog {
Daniel Borkmann286aad32014-09-08 08:04:49 +0200311 u16 pages; /* Number of allocated pages */
312 bool jited; /* Is our filter JIT'ed? */
313 u32 len; /* Number of filter blocks */
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100314 struct sock_fprog_kern *orig_prog; /* Original BPF program */
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700315 struct bpf_prog_aux *aux; /* Auxiliary fields */
Eric Dumazet0a148422011-04-20 09:27:32 +0000316 unsigned int (*bpf_func)(const struct sk_buff *skb,
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700317 const struct bpf_insn *filter);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200318 /* Instructions for interpreter */
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700319 union {
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100320 struct sock_filter insns[0];
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700321 struct bpf_insn insnsi[0];
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700322 };
Stephen Hemmingerb7156312008-04-10 01:33:47 -0700323};
324
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700325struct sk_filter {
326 atomic_t refcnt;
327 struct rcu_head rcu;
328 struct bpf_prog *prog;
329};
330
331#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
332
333static inline unsigned int bpf_prog_size(unsigned int proglen)
Stephen Hemmingerb7156312008-04-10 01:33:47 -0700334{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700335 return max(sizeof(struct bpf_prog),
336 offsetof(struct bpf_prog, insns[proglen]));
Stephen Hemmingerb7156312008-04-10 01:33:47 -0700337}
338
Alexei Starovoitov009937e2014-07-30 20:34:13 -0700339#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100340
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200341#ifdef CONFIG_DEBUG_SET_MODULE_RONX
342static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
343{
344 set_memory_ro((unsigned long)fp, fp->pages);
345}
346
347static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
348{
349 set_memory_rw((unsigned long)fp, fp->pages);
350}
351#else
352static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
353{
354}
355
356static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
357{
358}
359#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
360
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100361int sk_filter(struct sock *sk, struct sk_buff *skb);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100362
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700363void bpf_prog_select_runtime(struct bpf_prog *fp);
364void bpf_prog_free(struct bpf_prog *fp);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100365
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700366int bpf_convert_filter(struct sock_filter *prog, int len,
367 struct bpf_insn *new_prog, int *new_len);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100368
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200369struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
370struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
371 gfp_t gfp_extra_flags);
372void __bpf_prog_free(struct bpf_prog *fp);
373
374static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
375{
376 bpf_prog_unlock_ro(fp);
377 __bpf_prog_free(fp);
378}
379
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700380int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
381void bpf_prog_destroy(struct bpf_prog *fp);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100382
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100383int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
384int sk_detach_filter(struct sock *sk);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100385
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -0700386int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100387int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
388 unsigned int len);
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100389
Alexei Starovoitov278571b2014-07-30 20:34:12 -0700390bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100391void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
Eric Dumazet0a148422011-04-20 09:27:32 +0000392
Alexei Starovoitov62258272014-05-13 19:50:46 -0700393u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700394void bpf_int_jit_compile(struct bpf_prog *fp);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700395
Daniel Borkmannb954d832014-09-10 15:01:02 +0200396#ifdef CONFIG_BPF_JIT
397typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
398
399struct bpf_binary_header *
400bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
401 unsigned int alignment,
402 bpf_jit_fill_hole_t bpf_fill_ill_insns);
403void bpf_jit_binary_free(struct bpf_binary_header *hdr);
404
405void bpf_jit_compile(struct bpf_prog *fp);
406void bpf_jit_free(struct bpf_prog *fp);
407
408static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
409 u32 pass, void *image)
410{
411 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
412 flen, proglen, pass, image);
413 if (image)
414 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
415 16, 1, image, proglen, false);
416}
417#else
418static inline void bpf_jit_compile(struct bpf_prog *fp)
419{
420}
421
422static inline void bpf_jit_free(struct bpf_prog *fp)
423{
424 bpf_prog_unlock_free(fp);
425}
426#endif /* CONFIG_BPF_JIT */
427
Daniel Borkmann34805932014-05-29 10:22:50 +0200428#define BPF_ANC BIT(15)
429
430static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
431{
432 BUG_ON(ftest->code & BPF_ANC);
433
434 switch (ftest->code) {
435 case BPF_LD | BPF_W | BPF_ABS:
436 case BPF_LD | BPF_H | BPF_ABS:
437 case BPF_LD | BPF_B | BPF_ABS:
438#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
439 return BPF_ANC | SKF_AD_##CODE
440 switch (ftest->k) {
441 BPF_ANCILLARY(PROTOCOL);
442 BPF_ANCILLARY(PKTTYPE);
443 BPF_ANCILLARY(IFINDEX);
444 BPF_ANCILLARY(NLATTR);
445 BPF_ANCILLARY(NLATTR_NEST);
446 BPF_ANCILLARY(MARK);
447 BPF_ANCILLARY(QUEUE);
448 BPF_ANCILLARY(HATYPE);
449 BPF_ANCILLARY(RXHASH);
450 BPF_ANCILLARY(CPU);
451 BPF_ANCILLARY(ALU_XOR_X);
452 BPF_ANCILLARY(VLAN_TAG);
453 BPF_ANCILLARY(VLAN_TAG_PRESENT);
454 BPF_ANCILLARY(PAY_OFFSET);
455 BPF_ANCILLARY(RANDOM);
456 }
457 /* Fallthrough. */
458 default:
459 return ftest->code;
460 }
461}
462
Zi Shen Lim9f12fbe2014-07-03 07:56:54 -0700463void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
464 int k, unsigned int size);
465
466static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
467 unsigned int size, void *buffer)
468{
469 if (k >= 0)
470 return skb_header_pointer(skb, k, size, buffer);
471
472 return bpf_internal_load_pointer_neg_helper(skb, k, size);
473}
474
Michal Sekletarea02f942014-01-17 17:09:45 +0100475static inline int bpf_tell_extensions(void)
476{
Daniel Borkmann37692292014-01-21 00:19:37 +0100477 return SKF_AD_MAX;
Michal Sekletarea02f942014-01-17 17:09:45 +0100478}
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480#endif /* __LINUX_FILTER_H__ */