blob: b54bb2c2e494e086ee1e34ee39b8972fd9d6f344 [file] [log] [blame]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070022 */
23#include <linux/filter.h>
24#include <linux/skbuff.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020025#include <linux/vmalloc.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070026#include <asm/unaligned.h>
27
28/* Registers */
29#define BPF_R0 regs[BPF_REG_0]
30#define BPF_R1 regs[BPF_REG_1]
31#define BPF_R2 regs[BPF_REG_2]
32#define BPF_R3 regs[BPF_REG_3]
33#define BPF_R4 regs[BPF_REG_4]
34#define BPF_R5 regs[BPF_REG_5]
35#define BPF_R6 regs[BPF_REG_6]
36#define BPF_R7 regs[BPF_REG_7]
37#define BPF_R8 regs[BPF_REG_8]
38#define BPF_R9 regs[BPF_REG_9]
39#define BPF_R10 regs[BPF_REG_10]
40
41/* Named registers */
42#define DST regs[insn->dst_reg]
43#define SRC regs[insn->src_reg]
44#define FP regs[BPF_REG_FP]
45#define ARG1 regs[BPF_REG_ARG1]
46#define CTX regs[BPF_REG_CTX]
47#define IMM insn->imm
48
49/* No hurry in this branch
50 *
51 * Exported for the bpf jit load helper.
52 */
53void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
54{
55 u8 *ptr = NULL;
56
57 if (k >= SKF_NET_OFF)
58 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
59 else if (k >= SKF_LL_OFF)
60 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
61 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
62 return ptr;
63
64 return NULL;
65}
66
Daniel Borkmann60a3b222014-09-02 22:53:44 +020067struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
68{
69 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
70 gfp_extra_flags;
71 struct bpf_work_struct *ws;
72 struct bpf_prog *fp;
73
74 size = round_up(size, PAGE_SIZE);
75 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
76 if (fp == NULL)
77 return NULL;
78
79 ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
80 if (ws == NULL) {
81 vfree(fp);
82 return NULL;
83 }
84
85 fp->pages = size / PAGE_SIZE;
86 fp->work = ws;
87
88 return fp;
89}
90EXPORT_SYMBOL_GPL(bpf_prog_alloc);
91
92struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
93 gfp_t gfp_extra_flags)
94{
95 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
96 gfp_extra_flags;
97 struct bpf_prog *fp;
98
99 BUG_ON(fp_old == NULL);
100
101 size = round_up(size, PAGE_SIZE);
102 if (size <= fp_old->pages * PAGE_SIZE)
103 return fp_old;
104
105 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
106 if (fp != NULL) {
107 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
108 fp->pages = size / PAGE_SIZE;
109
110 /* We keep fp->work from fp_old around in the new
111 * reallocated structure.
112 */
113 fp_old->work = NULL;
114 __bpf_prog_free(fp_old);
115 }
116
117 return fp;
118}
119EXPORT_SYMBOL_GPL(bpf_prog_realloc);
120
121void __bpf_prog_free(struct bpf_prog *fp)
122{
123 kfree(fp->work);
124 vfree(fp);
125}
126EXPORT_SYMBOL_GPL(__bpf_prog_free);
127
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700128/* Base function for offset calculation. Needs to go into .text section,
129 * therefore keeping it non-static as well; will also be used by JITs
130 * anyway later on, so do not let the compiler omit it.
131 */
132noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
133{
134 return 0;
135}
136
137/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700138 * __bpf_prog_run - run eBPF program on a given context
139 * @ctx: is the data we are operating on
140 * @insn: is the array of eBPF instructions
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700141 *
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700142 * Decode and execute eBPF instructions.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700143 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700144static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700145{
146 u64 stack[MAX_BPF_STACK / sizeof(u64)];
147 u64 regs[MAX_BPF_REG], tmp;
148 static const void *jumptable[256] = {
149 [0 ... 255] = &&default_label,
150 /* Now overwrite non-defaults ... */
151 /* 32 bit ALU operations */
152 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
153 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
154 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
155 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
156 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
157 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
158 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
159 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
160 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
161 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
162 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
163 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
164 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
165 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
166 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
167 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
168 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
169 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
170 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
171 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
172 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
173 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
174 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
175 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
176 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
177 /* 64 bit ALU operations */
178 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
179 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
180 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
181 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
182 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
183 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
184 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
185 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
186 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
187 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
188 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
189 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
190 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
191 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
192 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
193 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
194 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
195 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
196 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
197 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
198 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
199 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
200 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
201 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
202 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
203 /* Call instruction */
204 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
205 /* Jumps */
206 [BPF_JMP | BPF_JA] = &&JMP_JA,
207 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
208 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
209 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
210 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
211 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
212 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
213 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
214 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
215 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
216 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
217 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
218 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
219 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
220 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
221 /* Program return */
222 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
223 /* Store instructions */
224 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
225 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
226 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
227 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
228 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
229 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
230 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
231 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
232 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
233 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
234 /* Load instructions */
235 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
236 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
237 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
238 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
239 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
240 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
241 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
242 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
243 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
244 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
245 };
246 void *ptr;
247 int off;
248
249#define CONT ({ insn++; goto select_insn; })
250#define CONT_JMP ({ insn++; goto select_insn; })
251
252 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
253 ARG1 = (u64) (unsigned long) ctx;
254
255 /* Registers used in classic BPF programs need to be reset first. */
256 regs[BPF_REG_A] = 0;
257 regs[BPF_REG_X] = 0;
258
259select_insn:
260 goto *jumptable[insn->code];
261
262 /* ALU */
263#define ALU(OPCODE, OP) \
264 ALU64_##OPCODE##_X: \
265 DST = DST OP SRC; \
266 CONT; \
267 ALU_##OPCODE##_X: \
268 DST = (u32) DST OP (u32) SRC; \
269 CONT; \
270 ALU64_##OPCODE##_K: \
271 DST = DST OP IMM; \
272 CONT; \
273 ALU_##OPCODE##_K: \
274 DST = (u32) DST OP (u32) IMM; \
275 CONT;
276
277 ALU(ADD, +)
278 ALU(SUB, -)
279 ALU(AND, &)
280 ALU(OR, |)
281 ALU(LSH, <<)
282 ALU(RSH, >>)
283 ALU(XOR, ^)
284 ALU(MUL, *)
285#undef ALU
286 ALU_NEG:
287 DST = (u32) -DST;
288 CONT;
289 ALU64_NEG:
290 DST = -DST;
291 CONT;
292 ALU_MOV_X:
293 DST = (u32) SRC;
294 CONT;
295 ALU_MOV_K:
296 DST = (u32) IMM;
297 CONT;
298 ALU64_MOV_X:
299 DST = SRC;
300 CONT;
301 ALU64_MOV_K:
302 DST = IMM;
303 CONT;
304 ALU64_ARSH_X:
305 (*(s64 *) &DST) >>= SRC;
306 CONT;
307 ALU64_ARSH_K:
308 (*(s64 *) &DST) >>= IMM;
309 CONT;
310 ALU64_MOD_X:
311 if (unlikely(SRC == 0))
312 return 0;
313 tmp = DST;
314 DST = do_div(tmp, SRC);
315 CONT;
316 ALU_MOD_X:
317 if (unlikely(SRC == 0))
318 return 0;
319 tmp = (u32) DST;
320 DST = do_div(tmp, (u32) SRC);
321 CONT;
322 ALU64_MOD_K:
323 tmp = DST;
324 DST = do_div(tmp, IMM);
325 CONT;
326 ALU_MOD_K:
327 tmp = (u32) DST;
328 DST = do_div(tmp, (u32) IMM);
329 CONT;
330 ALU64_DIV_X:
331 if (unlikely(SRC == 0))
332 return 0;
333 do_div(DST, SRC);
334 CONT;
335 ALU_DIV_X:
336 if (unlikely(SRC == 0))
337 return 0;
338 tmp = (u32) DST;
339 do_div(tmp, (u32) SRC);
340 DST = (u32) tmp;
341 CONT;
342 ALU64_DIV_K:
343 do_div(DST, IMM);
344 CONT;
345 ALU_DIV_K:
346 tmp = (u32) DST;
347 do_div(tmp, (u32) IMM);
348 DST = (u32) tmp;
349 CONT;
350 ALU_END_TO_BE:
351 switch (IMM) {
352 case 16:
353 DST = (__force u16) cpu_to_be16(DST);
354 break;
355 case 32:
356 DST = (__force u32) cpu_to_be32(DST);
357 break;
358 case 64:
359 DST = (__force u64) cpu_to_be64(DST);
360 break;
361 }
362 CONT;
363 ALU_END_TO_LE:
364 switch (IMM) {
365 case 16:
366 DST = (__force u16) cpu_to_le16(DST);
367 break;
368 case 32:
369 DST = (__force u32) cpu_to_le32(DST);
370 break;
371 case 64:
372 DST = (__force u64) cpu_to_le64(DST);
373 break;
374 }
375 CONT;
376
377 /* CALL */
378 JMP_CALL:
379 /* Function call scratches BPF_R1-BPF_R5 registers,
380 * preserves BPF_R6-BPF_R9, and stores return value
381 * into BPF_R0.
382 */
383 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
384 BPF_R4, BPF_R5);
385 CONT;
386
387 /* JMP */
388 JMP_JA:
389 insn += insn->off;
390 CONT;
391 JMP_JEQ_X:
392 if (DST == SRC) {
393 insn += insn->off;
394 CONT_JMP;
395 }
396 CONT;
397 JMP_JEQ_K:
398 if (DST == IMM) {
399 insn += insn->off;
400 CONT_JMP;
401 }
402 CONT;
403 JMP_JNE_X:
404 if (DST != SRC) {
405 insn += insn->off;
406 CONT_JMP;
407 }
408 CONT;
409 JMP_JNE_K:
410 if (DST != IMM) {
411 insn += insn->off;
412 CONT_JMP;
413 }
414 CONT;
415 JMP_JGT_X:
416 if (DST > SRC) {
417 insn += insn->off;
418 CONT_JMP;
419 }
420 CONT;
421 JMP_JGT_K:
422 if (DST > IMM) {
423 insn += insn->off;
424 CONT_JMP;
425 }
426 CONT;
427 JMP_JGE_X:
428 if (DST >= SRC) {
429 insn += insn->off;
430 CONT_JMP;
431 }
432 CONT;
433 JMP_JGE_K:
434 if (DST >= IMM) {
435 insn += insn->off;
436 CONT_JMP;
437 }
438 CONT;
439 JMP_JSGT_X:
440 if (((s64) DST) > ((s64) SRC)) {
441 insn += insn->off;
442 CONT_JMP;
443 }
444 CONT;
445 JMP_JSGT_K:
446 if (((s64) DST) > ((s64) IMM)) {
447 insn += insn->off;
448 CONT_JMP;
449 }
450 CONT;
451 JMP_JSGE_X:
452 if (((s64) DST) >= ((s64) SRC)) {
453 insn += insn->off;
454 CONT_JMP;
455 }
456 CONT;
457 JMP_JSGE_K:
458 if (((s64) DST) >= ((s64) IMM)) {
459 insn += insn->off;
460 CONT_JMP;
461 }
462 CONT;
463 JMP_JSET_X:
464 if (DST & SRC) {
465 insn += insn->off;
466 CONT_JMP;
467 }
468 CONT;
469 JMP_JSET_K:
470 if (DST & IMM) {
471 insn += insn->off;
472 CONT_JMP;
473 }
474 CONT;
475 JMP_EXIT:
476 return BPF_R0;
477
478 /* STX and ST and LDX*/
479#define LDST(SIZEOP, SIZE) \
480 STX_MEM_##SIZEOP: \
481 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
482 CONT; \
483 ST_MEM_##SIZEOP: \
484 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
485 CONT; \
486 LDX_MEM_##SIZEOP: \
487 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
488 CONT;
489
490 LDST(B, u8)
491 LDST(H, u16)
492 LDST(W, u32)
493 LDST(DW, u64)
494#undef LDST
495 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
496 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
497 (DST + insn->off));
498 CONT;
499 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
500 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
501 (DST + insn->off));
502 CONT;
503 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
504 off = IMM;
505load_word:
506 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
507 * only appearing in the programs where ctx ==
508 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700509 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700510 * internal BPF verifier will check that BPF_R6 ==
511 * ctx.
512 *
513 * BPF_ABS and BPF_IND are wrappers of function calls,
514 * so they scratch BPF_R1-BPF_R5 registers, preserve
515 * BPF_R6-BPF_R9, and store return value into BPF_R0.
516 *
517 * Implicit input:
518 * ctx == skb == BPF_R6 == CTX
519 *
520 * Explicit input:
521 * SRC == any register
522 * IMM == 32-bit immediate
523 *
524 * Output:
525 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
526 */
527
528 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
529 if (likely(ptr != NULL)) {
530 BPF_R0 = get_unaligned_be32(ptr);
531 CONT;
532 }
533
534 return 0;
535 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
536 off = IMM;
537load_half:
538 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
539 if (likely(ptr != NULL)) {
540 BPF_R0 = get_unaligned_be16(ptr);
541 CONT;
542 }
543
544 return 0;
545 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
546 off = IMM;
547load_byte:
548 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
549 if (likely(ptr != NULL)) {
550 BPF_R0 = *(u8 *)ptr;
551 CONT;
552 }
553
554 return 0;
555 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
556 off = IMM + SRC;
557 goto load_word;
558 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
559 off = IMM + SRC;
560 goto load_half;
561 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
562 off = IMM + SRC;
563 goto load_byte;
564
565 default_label:
566 /* If we ever reach this, we have a bug somewhere. */
567 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
568 return 0;
569}
570
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700571void __weak bpf_int_jit_compile(struct bpf_prog *prog)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700572{
573}
574
575/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700576 * bpf_prog_select_runtime - select execution runtime for BPF program
577 * @fp: bpf_prog populated with internal BPF program
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700578 *
579 * try to JIT internal BPF program, if JIT is not available select interpreter
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700580 * BPF program will be executed via BPF_PROG_RUN() macro
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700581 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700582void bpf_prog_select_runtime(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700583{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700584 fp->bpf_func = (void *) __bpf_prog_run;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700585
586 /* Probe if internal BPF can be JITed */
587 bpf_int_jit_compile(fp);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200588 /* Lock whole bpf_prog as read-only */
589 bpf_prog_lock_ro(fp);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700590}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700591EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700592
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200593static void bpf_prog_free_deferred(struct work_struct *work)
594{
595 struct bpf_work_struct *ws;
596
597 ws = container_of(work, struct bpf_work_struct, work);
598 bpf_jit_free(ws->prog);
599}
600
601/* Free internal BPF program */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700602void bpf_prog_free(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700603{
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200604 struct bpf_work_struct *ws = fp->work;
605
606 INIT_WORK(&ws->work, bpf_prog_free_deferred);
607 ws->prog = fp;
608 schedule_work(&ws->work);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700609}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700610EXPORT_SYMBOL_GPL(bpf_prog_free);