blob: c8855c2a7a480dfd2908b02ec63569e1c448f9fa [file] [log] [blame]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070022 */
Daniel Borkmann738cbe72014-09-08 08:04:47 +020023
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070024#include <linux/filter.h>
25#include <linux/skbuff.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020026#include <linux/vmalloc.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020027#include <linux/random.h>
28#include <linux/moduleloader.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070029#include <linux/bpf.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070030
Daniel Borkmann3324b582015-05-29 23:23:07 +020031#include <asm/unaligned.h>
32
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070033/* Registers */
34#define BPF_R0 regs[BPF_REG_0]
35#define BPF_R1 regs[BPF_REG_1]
36#define BPF_R2 regs[BPF_REG_2]
37#define BPF_R3 regs[BPF_REG_3]
38#define BPF_R4 regs[BPF_REG_4]
39#define BPF_R5 regs[BPF_REG_5]
40#define BPF_R6 regs[BPF_REG_6]
41#define BPF_R7 regs[BPF_REG_7]
42#define BPF_R8 regs[BPF_REG_8]
43#define BPF_R9 regs[BPF_REG_9]
44#define BPF_R10 regs[BPF_REG_10]
45
46/* Named registers */
47#define DST regs[insn->dst_reg]
48#define SRC regs[insn->src_reg]
49#define FP regs[BPF_REG_FP]
50#define ARG1 regs[BPF_REG_ARG1]
51#define CTX regs[BPF_REG_CTX]
52#define IMM insn->imm
53
54/* No hurry in this branch
55 *
56 * Exported for the bpf jit load helper.
57 */
58void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
59{
60 u8 *ptr = NULL;
61
62 if (k >= SKF_NET_OFF)
63 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
64 else if (k >= SKF_LL_OFF)
65 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
Daniel Borkmann3324b582015-05-29 23:23:07 +020066
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070067 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
68 return ptr;
69
70 return NULL;
71}
72
Daniel Borkmann60a3b222014-09-02 22:53:44 +020073struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
74{
75 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
76 gfp_extra_flags;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070077 struct bpf_prog_aux *aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +020078 struct bpf_prog *fp;
79
80 size = round_up(size, PAGE_SIZE);
81 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
82 if (fp == NULL)
83 return NULL;
84
Daniel Borkmanna91263d2015-09-30 01:41:50 +020085 kmemcheck_annotate_bitfield(fp, meta);
86
Alexei Starovoitov09756af2014-09-26 00:17:00 -070087 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
88 if (aux == NULL) {
Daniel Borkmann60a3b222014-09-02 22:53:44 +020089 vfree(fp);
90 return NULL;
91 }
92
93 fp->pages = size / PAGE_SIZE;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070094 fp->aux = aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +020095
96 return fp;
97}
98EXPORT_SYMBOL_GPL(bpf_prog_alloc);
99
100struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
101 gfp_t gfp_extra_flags)
102{
103 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
104 gfp_extra_flags;
105 struct bpf_prog *fp;
106
107 BUG_ON(fp_old == NULL);
108
109 size = round_up(size, PAGE_SIZE);
110 if (size <= fp_old->pages * PAGE_SIZE)
111 return fp_old;
112
113 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
114 if (fp != NULL) {
Daniel Borkmanna91263d2015-09-30 01:41:50 +0200115 kmemcheck_annotate_bitfield(fp, meta);
116
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200117 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
118 fp->pages = size / PAGE_SIZE;
119
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700120 /* We keep fp->aux from fp_old around in the new
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200121 * reallocated structure.
122 */
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700123 fp_old->aux = NULL;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200124 __bpf_prog_free(fp_old);
125 }
126
127 return fp;
128}
129EXPORT_SYMBOL_GPL(bpf_prog_realloc);
130
131void __bpf_prog_free(struct bpf_prog *fp)
132{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700133 kfree(fp->aux);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200134 vfree(fp);
135}
136EXPORT_SYMBOL_GPL(__bpf_prog_free);
137
Daniel Borkmannb954d832014-09-10 15:01:02 +0200138#ifdef CONFIG_BPF_JIT
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200139struct bpf_binary_header *
140bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
141 unsigned int alignment,
142 bpf_jit_fill_hole_t bpf_fill_ill_insns)
143{
144 struct bpf_binary_header *hdr;
145 unsigned int size, hole, start;
146
147 /* Most of BPF filters are really small, but if some of them
148 * fill a page, allow at least 128 extra bytes to insert a
149 * random section of illegal instructions.
150 */
151 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
152 hdr = module_alloc(size);
153 if (hdr == NULL)
154 return NULL;
155
156 /* Fill space with illegal/arch-dep instructions. */
157 bpf_fill_ill_insns(hdr, size);
158
159 hdr->pages = size / PAGE_SIZE;
160 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
161 PAGE_SIZE - sizeof(*hdr));
162 start = (prandom_u32() % hole) & ~(alignment - 1);
163
164 /* Leave a random number of instructions before BPF code. */
165 *image_ptr = &hdr->image[start];
166
167 return hdr;
168}
169
170void bpf_jit_binary_free(struct bpf_binary_header *hdr)
171{
Rusty Russellbe1f2212015-01-20 09:07:05 +1030172 module_memfree(hdr);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200173}
Daniel Borkmannb954d832014-09-10 15:01:02 +0200174#endif /* CONFIG_BPF_JIT */
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200175
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700176/* Base function for offset calculation. Needs to go into .text section,
177 * therefore keeping it non-static as well; will also be used by JITs
178 * anyway later on, so do not let the compiler omit it.
179 */
180noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
181{
182 return 0;
183}
Alexei Starovoitov4d9c5c52015-07-20 20:34:19 -0700184EXPORT_SYMBOL_GPL(__bpf_call_base);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700185
186/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700187 * __bpf_prog_run - run eBPF program on a given context
188 * @ctx: is the data we are operating on
189 * @insn: is the array of eBPF instructions
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700190 *
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700191 * Decode and execute eBPF instructions.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700192 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700193static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700194{
195 u64 stack[MAX_BPF_STACK / sizeof(u64)];
196 u64 regs[MAX_BPF_REG], tmp;
197 static const void *jumptable[256] = {
198 [0 ... 255] = &&default_label,
199 /* Now overwrite non-defaults ... */
200 /* 32 bit ALU operations */
201 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
202 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
203 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
204 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
205 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
206 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
207 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
208 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
209 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
210 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
211 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
212 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
213 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
214 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
215 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
216 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
217 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
218 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
219 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
220 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
221 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
222 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
223 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
224 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
225 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
226 /* 64 bit ALU operations */
227 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
228 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
229 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
230 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
231 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
232 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
233 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
234 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
235 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
236 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
237 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
238 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
239 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
240 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
241 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
242 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
243 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
244 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
245 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
246 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
247 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
248 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
249 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
250 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
251 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
252 /* Call instruction */
253 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700254 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700255 /* Jumps */
256 [BPF_JMP | BPF_JA] = &&JMP_JA,
257 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
258 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
259 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
260 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
261 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
262 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
263 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
264 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
265 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
266 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
267 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
268 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
269 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
270 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
271 /* Program return */
272 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
273 /* Store instructions */
274 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
275 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
276 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
277 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
278 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
279 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
280 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
281 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
282 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
283 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
284 /* Load instructions */
285 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
286 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
287 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
288 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
289 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
290 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
291 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
292 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
293 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
294 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700295 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700296 };
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700297 u32 tail_call_cnt = 0;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700298 void *ptr;
299 int off;
300
301#define CONT ({ insn++; goto select_insn; })
302#define CONT_JMP ({ insn++; goto select_insn; })
303
304 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
305 ARG1 = (u64) (unsigned long) ctx;
306
307 /* Registers used in classic BPF programs need to be reset first. */
308 regs[BPF_REG_A] = 0;
309 regs[BPF_REG_X] = 0;
310
311select_insn:
312 goto *jumptable[insn->code];
313
314 /* ALU */
315#define ALU(OPCODE, OP) \
316 ALU64_##OPCODE##_X: \
317 DST = DST OP SRC; \
318 CONT; \
319 ALU_##OPCODE##_X: \
320 DST = (u32) DST OP (u32) SRC; \
321 CONT; \
322 ALU64_##OPCODE##_K: \
323 DST = DST OP IMM; \
324 CONT; \
325 ALU_##OPCODE##_K: \
326 DST = (u32) DST OP (u32) IMM; \
327 CONT;
328
329 ALU(ADD, +)
330 ALU(SUB, -)
331 ALU(AND, &)
332 ALU(OR, |)
333 ALU(LSH, <<)
334 ALU(RSH, >>)
335 ALU(XOR, ^)
336 ALU(MUL, *)
337#undef ALU
338 ALU_NEG:
339 DST = (u32) -DST;
340 CONT;
341 ALU64_NEG:
342 DST = -DST;
343 CONT;
344 ALU_MOV_X:
345 DST = (u32) SRC;
346 CONT;
347 ALU_MOV_K:
348 DST = (u32) IMM;
349 CONT;
350 ALU64_MOV_X:
351 DST = SRC;
352 CONT;
353 ALU64_MOV_K:
354 DST = IMM;
355 CONT;
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700356 LD_IMM_DW:
357 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
358 insn++;
359 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700360 ALU64_ARSH_X:
361 (*(s64 *) &DST) >>= SRC;
362 CONT;
363 ALU64_ARSH_K:
364 (*(s64 *) &DST) >>= IMM;
365 CONT;
366 ALU64_MOD_X:
367 if (unlikely(SRC == 0))
368 return 0;
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700369 div64_u64_rem(DST, SRC, &tmp);
370 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700371 CONT;
372 ALU_MOD_X:
373 if (unlikely(SRC == 0))
374 return 0;
375 tmp = (u32) DST;
376 DST = do_div(tmp, (u32) SRC);
377 CONT;
378 ALU64_MOD_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700379 div64_u64_rem(DST, IMM, &tmp);
380 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700381 CONT;
382 ALU_MOD_K:
383 tmp = (u32) DST;
384 DST = do_div(tmp, (u32) IMM);
385 CONT;
386 ALU64_DIV_X:
387 if (unlikely(SRC == 0))
388 return 0;
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700389 DST = div64_u64(DST, SRC);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700390 CONT;
391 ALU_DIV_X:
392 if (unlikely(SRC == 0))
393 return 0;
394 tmp = (u32) DST;
395 do_div(tmp, (u32) SRC);
396 DST = (u32) tmp;
397 CONT;
398 ALU64_DIV_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700399 DST = div64_u64(DST, IMM);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700400 CONT;
401 ALU_DIV_K:
402 tmp = (u32) DST;
403 do_div(tmp, (u32) IMM);
404 DST = (u32) tmp;
405 CONT;
406 ALU_END_TO_BE:
407 switch (IMM) {
408 case 16:
409 DST = (__force u16) cpu_to_be16(DST);
410 break;
411 case 32:
412 DST = (__force u32) cpu_to_be32(DST);
413 break;
414 case 64:
415 DST = (__force u64) cpu_to_be64(DST);
416 break;
417 }
418 CONT;
419 ALU_END_TO_LE:
420 switch (IMM) {
421 case 16:
422 DST = (__force u16) cpu_to_le16(DST);
423 break;
424 case 32:
425 DST = (__force u32) cpu_to_le32(DST);
426 break;
427 case 64:
428 DST = (__force u64) cpu_to_le64(DST);
429 break;
430 }
431 CONT;
432
433 /* CALL */
434 JMP_CALL:
435 /* Function call scratches BPF_R1-BPF_R5 registers,
436 * preserves BPF_R6-BPF_R9, and stores return value
437 * into BPF_R0.
438 */
439 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
440 BPF_R4, BPF_R5);
441 CONT;
442
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700443 JMP_TAIL_CALL: {
444 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
445 struct bpf_array *array = container_of(map, struct bpf_array, map);
446 struct bpf_prog *prog;
447 u64 index = BPF_R3;
448
449 if (unlikely(index >= array->map.max_entries))
450 goto out;
451
452 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
453 goto out;
454
455 tail_call_cnt++;
456
Wang Nan2a36f0b2015-08-06 07:02:33 +0000457 prog = READ_ONCE(array->ptrs[index]);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700458 if (unlikely(!prog))
459 goto out;
460
Daniel Borkmannc4675f92015-07-13 20:49:32 +0200461 /* ARG1 at this point is guaranteed to point to CTX from
462 * the verifier side due to the fact that the tail call is
463 * handeled like a helper, that is, bpf_tail_call_proto,
464 * where arg1_type is ARG_PTR_TO_CTX.
465 */
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700466 insn = prog->insnsi;
467 goto select_insn;
468out:
469 CONT;
470 }
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700471 /* JMP */
472 JMP_JA:
473 insn += insn->off;
474 CONT;
475 JMP_JEQ_X:
476 if (DST == SRC) {
477 insn += insn->off;
478 CONT_JMP;
479 }
480 CONT;
481 JMP_JEQ_K:
482 if (DST == IMM) {
483 insn += insn->off;
484 CONT_JMP;
485 }
486 CONT;
487 JMP_JNE_X:
488 if (DST != SRC) {
489 insn += insn->off;
490 CONT_JMP;
491 }
492 CONT;
493 JMP_JNE_K:
494 if (DST != IMM) {
495 insn += insn->off;
496 CONT_JMP;
497 }
498 CONT;
499 JMP_JGT_X:
500 if (DST > SRC) {
501 insn += insn->off;
502 CONT_JMP;
503 }
504 CONT;
505 JMP_JGT_K:
506 if (DST > IMM) {
507 insn += insn->off;
508 CONT_JMP;
509 }
510 CONT;
511 JMP_JGE_X:
512 if (DST >= SRC) {
513 insn += insn->off;
514 CONT_JMP;
515 }
516 CONT;
517 JMP_JGE_K:
518 if (DST >= IMM) {
519 insn += insn->off;
520 CONT_JMP;
521 }
522 CONT;
523 JMP_JSGT_X:
524 if (((s64) DST) > ((s64) SRC)) {
525 insn += insn->off;
526 CONT_JMP;
527 }
528 CONT;
529 JMP_JSGT_K:
530 if (((s64) DST) > ((s64) IMM)) {
531 insn += insn->off;
532 CONT_JMP;
533 }
534 CONT;
535 JMP_JSGE_X:
536 if (((s64) DST) >= ((s64) SRC)) {
537 insn += insn->off;
538 CONT_JMP;
539 }
540 CONT;
541 JMP_JSGE_K:
542 if (((s64) DST) >= ((s64) IMM)) {
543 insn += insn->off;
544 CONT_JMP;
545 }
546 CONT;
547 JMP_JSET_X:
548 if (DST & SRC) {
549 insn += insn->off;
550 CONT_JMP;
551 }
552 CONT;
553 JMP_JSET_K:
554 if (DST & IMM) {
555 insn += insn->off;
556 CONT_JMP;
557 }
558 CONT;
559 JMP_EXIT:
560 return BPF_R0;
561
562 /* STX and ST and LDX*/
563#define LDST(SIZEOP, SIZE) \
564 STX_MEM_##SIZEOP: \
565 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
566 CONT; \
567 ST_MEM_##SIZEOP: \
568 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
569 CONT; \
570 LDX_MEM_##SIZEOP: \
571 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
572 CONT;
573
574 LDST(B, u8)
575 LDST(H, u16)
576 LDST(W, u32)
577 LDST(DW, u64)
578#undef LDST
579 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
580 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
581 (DST + insn->off));
582 CONT;
583 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
584 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
585 (DST + insn->off));
586 CONT;
587 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
588 off = IMM;
589load_word:
590 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
591 * only appearing in the programs where ctx ==
592 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700593 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700594 * internal BPF verifier will check that BPF_R6 ==
595 * ctx.
596 *
597 * BPF_ABS and BPF_IND are wrappers of function calls,
598 * so they scratch BPF_R1-BPF_R5 registers, preserve
599 * BPF_R6-BPF_R9, and store return value into BPF_R0.
600 *
601 * Implicit input:
602 * ctx == skb == BPF_R6 == CTX
603 *
604 * Explicit input:
605 * SRC == any register
606 * IMM == 32-bit immediate
607 *
608 * Output:
609 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
610 */
611
612 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
613 if (likely(ptr != NULL)) {
614 BPF_R0 = get_unaligned_be32(ptr);
615 CONT;
616 }
617
618 return 0;
619 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
620 off = IMM;
621load_half:
622 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
623 if (likely(ptr != NULL)) {
624 BPF_R0 = get_unaligned_be16(ptr);
625 CONT;
626 }
627
628 return 0;
629 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
630 off = IMM;
631load_byte:
632 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
633 if (likely(ptr != NULL)) {
634 BPF_R0 = *(u8 *)ptr;
635 CONT;
636 }
637
638 return 0;
639 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
640 off = IMM + SRC;
641 goto load_word;
642 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
643 off = IMM + SRC;
644 goto load_half;
645 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
646 off = IMM + SRC;
647 goto load_byte;
648
649 default_label:
650 /* If we ever reach this, we have a bug somewhere. */
651 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
652 return 0;
653}
654
Daniel Borkmann3324b582015-05-29 23:23:07 +0200655bool bpf_prog_array_compatible(struct bpf_array *array,
656 const struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700657{
Daniel Borkmann3324b582015-05-29 23:23:07 +0200658 if (!array->owner_prog_type) {
659 /* There's no owner yet where we could check for
660 * compatibility.
661 */
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700662 array->owner_prog_type = fp->type;
663 array->owner_jited = fp->jited;
Daniel Borkmann3324b582015-05-29 23:23:07 +0200664
665 return true;
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700666 }
Daniel Borkmann3324b582015-05-29 23:23:07 +0200667
668 return array->owner_prog_type == fp->type &&
669 array->owner_jited == fp->jited;
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700670}
671
Daniel Borkmann3324b582015-05-29 23:23:07 +0200672static int bpf_check_tail_call(const struct bpf_prog *fp)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700673{
674 struct bpf_prog_aux *aux = fp->aux;
675 int i;
676
677 for (i = 0; i < aux->used_map_cnt; i++) {
Daniel Borkmann3324b582015-05-29 23:23:07 +0200678 struct bpf_map *map = aux->used_maps[i];
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700679 struct bpf_array *array;
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700680
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700681 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
682 continue;
Daniel Borkmann3324b582015-05-29 23:23:07 +0200683
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700684 array = container_of(map, struct bpf_array, map);
685 if (!bpf_prog_array_compatible(array, fp))
686 return -EINVAL;
687 }
688
689 return 0;
690}
691
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700692/**
Daniel Borkmann3324b582015-05-29 23:23:07 +0200693 * bpf_prog_select_runtime - select exec runtime for BPF program
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700694 * @fp: bpf_prog populated with internal BPF program
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700695 *
Daniel Borkmann3324b582015-05-29 23:23:07 +0200696 * Try to JIT eBPF program, if JIT is not available, use interpreter.
697 * The BPF program will be executed via BPF_PROG_RUN() macro.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700698 */
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700699int bpf_prog_select_runtime(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700700{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700701 fp->bpf_func = (void *) __bpf_prog_run;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700702
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700703 bpf_int_jit_compile(fp);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200704 bpf_prog_lock_ro(fp);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700705
Daniel Borkmann3324b582015-05-29 23:23:07 +0200706 /* The tail call compatibility check can only be done at
707 * this late stage as we need to determine, if we deal
708 * with JITed or non JITed program concatenations and not
709 * all eBPF JITs might immediately support all features.
710 */
711 return bpf_check_tail_call(fp);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700712}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700713EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700714
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200715static void bpf_prog_free_deferred(struct work_struct *work)
716{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700717 struct bpf_prog_aux *aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200718
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700719 aux = container_of(work, struct bpf_prog_aux, work);
720 bpf_jit_free(aux->prog);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200721}
722
723/* Free internal BPF program */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700724void bpf_prog_free(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700725{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700726 struct bpf_prog_aux *aux = fp->aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200727
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700728 INIT_WORK(&aux->work, bpf_prog_free_deferred);
729 aux->prog = fp;
730 schedule_work(&aux->work);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700731}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700732EXPORT_SYMBOL_GPL(bpf_prog_free);
Alexei Starovoitovf89b7752014-10-23 18:41:08 -0700733
Daniel Borkmann3ba67da2015-03-05 23:27:51 +0100734/* Weak definitions of helper functions in case we don't have bpf syscall. */
735const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
736const struct bpf_func_proto bpf_map_update_elem_proto __weak;
737const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
738
Daniel Borkmann03e69b52015-03-14 02:27:16 +0100739const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
Daniel Borkmannc04167c2015-03-14 02:27:17 +0100740const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +0200741const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700742const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
743const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
744const struct bpf_func_proto bpf_get_current_comm_proto __weak;
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700745const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
746{
747 return NULL;
748}
Daniel Borkmann03e69b52015-03-14 02:27:16 +0100749
Daniel Borkmann3324b582015-05-29 23:23:07 +0200750/* Always built-in helper functions. */
751const struct bpf_func_proto bpf_tail_call_proto = {
752 .func = NULL,
753 .gpl_only = false,
754 .ret_type = RET_VOID,
755 .arg1_type = ARG_PTR_TO_CTX,
756 .arg2_type = ARG_CONST_MAP_PTR,
757 .arg3_type = ARG_ANYTHING,
758};
759
760/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
761void __weak bpf_int_jit_compile(struct bpf_prog *prog)
762{
763}
764
Alexei Starovoitovf89b7752014-10-23 18:41:08 -0700765/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
766 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
767 */
768int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
769 int len)
770{
771 return -EFAULT;
772}