blob: 41f1e3e2ea24803f29cd6a7a49e48dadc58f1173 [file] [log] [blame]
Zi Shen Lime54bcde2014-08-26 21:15:30 -07001/*
2 * BPF JIT compiler for ARM64
3 *
4 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#define pr_fmt(fmt) "bpf_jit: " fmt
20
21#include <linux/filter.h>
Zi Shen Lime54bcde2014-08-26 21:15:30 -070022#include <linux/printk.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
Daniel Borkmannb569c1c2014-09-16 08:48:50 +010025
Zi Shen Lime54bcde2014-08-26 21:15:30 -070026#include <asm/byteorder.h>
27#include <asm/cacheflush.h>
Daniel Borkmannb569c1c2014-09-16 08:48:50 +010028#include <asm/debug-monitors.h>
Zi Shen Lime54bcde2014-08-26 21:15:30 -070029
30#include "bpf_jit.h"
31
32int bpf_jit_enable __read_mostly;
33
34#define TMP_REG_1 (MAX_BPF_REG + 0)
35#define TMP_REG_2 (MAX_BPF_REG + 1)
36
37/* Map BPF registers to A64 registers */
38static const int bpf2a64[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1] = A64_R(0),
43 [BPF_REG_2] = A64_R(1),
44 [BPF_REG_3] = A64_R(2),
45 [BPF_REG_4] = A64_R(3),
46 [BPF_REG_5] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6] = A64_R(19),
49 [BPF_REG_7] = A64_R(20),
50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP] = A64_FP,
54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24),
57};
58
59struct jit_ctx {
60 const struct bpf_prog *prog;
61 int idx;
62 int tmp_used;
63 int body_offset;
64 int *offset;
65 u32 *image;
66};
67
68static inline void emit(const u32 insn, struct jit_ctx *ctx)
69{
70 if (ctx->image != NULL)
71 ctx->image[ctx->idx] = cpu_to_le32(insn);
72
73 ctx->idx++;
74}
75
76static inline void emit_a64_mov_i64(const int reg, const u64 val,
77 struct jit_ctx *ctx)
78{
79 u64 tmp = val;
80 int shift = 0;
81
82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
83 tmp >>= 16;
84 shift += 16;
85 while (tmp) {
86 if (tmp & 0xffff)
87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
88 tmp >>= 16;
89 shift += 16;
90 }
91}
92
93static inline void emit_a64_mov_i(const int is64, const int reg,
94 const s32 val, struct jit_ctx *ctx)
95{
96 u16 hi = val >> 16;
97 u16 lo = val & 0xffff;
98
99 if (hi & 0x8000) {
100 if (hi == 0xffff) {
101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
102 } else {
103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
104 emit(A64_MOVK(is64, reg, lo, 0), ctx);
105 }
106 } else {
107 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
108 if (hi)
109 emit(A64_MOVK(is64, reg, hi, 16), ctx);
110 }
111}
112
113static inline int bpf2a64_offset(int bpf_to, int bpf_from,
114 const struct jit_ctx *ctx)
115{
116 int to = ctx->offset[bpf_to + 1];
117 /* -1 to account for the Branch instruction */
118 int from = ctx->offset[bpf_from + 1] - 1;
119
120 return to - from;
121}
122
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100123static void jit_fill_hole(void *area, unsigned int size)
124{
125 u32 *ptr;
126 /* We are guaranteed to have aligned memory. */
127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
129}
130
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700131static inline int epilogue_offset(const struct jit_ctx *ctx)
132{
133 int to = ctx->offset[ctx->prog->len - 1];
134 int from = ctx->idx - ctx->body_offset;
135
136 return to - from;
137}
138
139/* Stack must be multiples of 16B */
140#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
141
142static void build_prologue(struct jit_ctx *ctx)
143{
144 const u8 r6 = bpf2a64[BPF_REG_6];
145 const u8 r7 = bpf2a64[BPF_REG_7];
146 const u8 r8 = bpf2a64[BPF_REG_8];
147 const u8 r9 = bpf2a64[BPF_REG_9];
148 const u8 fp = bpf2a64[BPF_REG_FP];
149 const u8 ra = bpf2a64[BPF_REG_A];
150 const u8 rx = bpf2a64[BPF_REG_X];
151 const u8 tmp1 = bpf2a64[TMP_REG_1];
152 const u8 tmp2 = bpf2a64[TMP_REG_2];
153 int stack_size = MAX_BPF_STACK;
154
155 stack_size += 4; /* extra for skb_copy_bits buffer */
156 stack_size = STACK_ALIGN(stack_size);
157
158 /* Save callee-saved register */
159 emit(A64_PUSH(r6, r7, A64_SP), ctx);
160 emit(A64_PUSH(r8, r9, A64_SP), ctx);
161 if (ctx->tmp_used)
162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
163
164 /* Set up BPF stack */
165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
166
167 /* Set up frame pointer */
168 emit(A64_MOV(1, fp, A64_SP), ctx);
169
170 /* Clear registers A and X */
171 emit_a64_mov_i64(ra, 0, ctx);
172 emit_a64_mov_i64(rx, 0, ctx);
173}
174
175static void build_epilogue(struct jit_ctx *ctx)
176{
177 const u8 r0 = bpf2a64[BPF_REG_0];
178 const u8 r6 = bpf2a64[BPF_REG_6];
179 const u8 r7 = bpf2a64[BPF_REG_7];
180 const u8 r8 = bpf2a64[BPF_REG_8];
181 const u8 r9 = bpf2a64[BPF_REG_9];
182 const u8 fp = bpf2a64[BPF_REG_FP];
183 const u8 tmp1 = bpf2a64[TMP_REG_1];
184 const u8 tmp2 = bpf2a64[TMP_REG_2];
185 int stack_size = MAX_BPF_STACK;
186
187 stack_size += 4; /* extra for skb_copy_bits buffer */
188 stack_size = STACK_ALIGN(stack_size);
189
190 /* We're done with BPF stack */
191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
192
193 /* Restore callee-saved register */
194 if (ctx->tmp_used)
195 emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
196 emit(A64_POP(r8, r9, A64_SP), ctx);
197 emit(A64_POP(r6, r7, A64_SP), ctx);
198
199 /* Restore frame pointer */
200 emit(A64_MOV(1, fp, A64_SP), ctx);
201
202 /* Set return value */
203 emit(A64_MOV(1, A64_R(0), r0), ctx);
204
205 emit(A64_RET(A64_LR), ctx);
206}
207
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100208/* JITs an eBPF instruction.
209 * Returns:
210 * 0 - successfully JITed an 8-byte eBPF instruction.
211 * >0 - successfully JITed a 16-byte eBPF instruction.
212 * <0 - failed to JIT.
213 */
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700214static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
215{
216 const u8 code = insn->code;
217 const u8 dst = bpf2a64[insn->dst_reg];
218 const u8 src = bpf2a64[insn->src_reg];
219 const u8 tmp = bpf2a64[TMP_REG_1];
220 const u8 tmp2 = bpf2a64[TMP_REG_2];
221 const s16 off = insn->off;
222 const s32 imm = insn->imm;
223 const int i = insn - ctx->prog->insnsi;
224 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
225 u8 jmp_cond;
226 s32 jmp_offset;
227
228 switch (code) {
229 /* dst = src */
230 case BPF_ALU | BPF_MOV | BPF_X:
231 case BPF_ALU64 | BPF_MOV | BPF_X:
232 emit(A64_MOV(is64, dst, src), ctx);
233 break;
234 /* dst = dst OP src */
235 case BPF_ALU | BPF_ADD | BPF_X:
236 case BPF_ALU64 | BPF_ADD | BPF_X:
237 emit(A64_ADD(is64, dst, dst, src), ctx);
238 break;
239 case BPF_ALU | BPF_SUB | BPF_X:
240 case BPF_ALU64 | BPF_SUB | BPF_X:
241 emit(A64_SUB(is64, dst, dst, src), ctx);
242 break;
243 case BPF_ALU | BPF_AND | BPF_X:
244 case BPF_ALU64 | BPF_AND | BPF_X:
245 emit(A64_AND(is64, dst, dst, src), ctx);
246 break;
247 case BPF_ALU | BPF_OR | BPF_X:
248 case BPF_ALU64 | BPF_OR | BPF_X:
249 emit(A64_ORR(is64, dst, dst, src), ctx);
250 break;
251 case BPF_ALU | BPF_XOR | BPF_X:
252 case BPF_ALU64 | BPF_XOR | BPF_X:
253 emit(A64_EOR(is64, dst, dst, src), ctx);
254 break;
255 case BPF_ALU | BPF_MUL | BPF_X:
256 case BPF_ALU64 | BPF_MUL | BPF_X:
257 emit(A64_MUL(is64, dst, dst, src), ctx);
258 break;
259 case BPF_ALU | BPF_DIV | BPF_X:
260 case BPF_ALU64 | BPF_DIV | BPF_X:
261 emit(A64_UDIV(is64, dst, dst, src), ctx);
262 break;
263 case BPF_ALU | BPF_MOD | BPF_X:
264 case BPF_ALU64 | BPF_MOD | BPF_X:
265 ctx->tmp_used = 1;
266 emit(A64_UDIV(is64, tmp, dst, src), ctx);
267 emit(A64_MUL(is64, tmp, tmp, src), ctx);
268 emit(A64_SUB(is64, dst, dst, tmp), ctx);
269 break;
Zi Shen Limd65a6342014-09-16 19:37:35 +0100270 case BPF_ALU | BPF_LSH | BPF_X:
271 case BPF_ALU64 | BPF_LSH | BPF_X:
272 emit(A64_LSLV(is64, dst, dst, src), ctx);
273 break;
274 case BPF_ALU | BPF_RSH | BPF_X:
275 case BPF_ALU64 | BPF_RSH | BPF_X:
276 emit(A64_LSRV(is64, dst, dst, src), ctx);
277 break;
278 case BPF_ALU | BPF_ARSH | BPF_X:
279 case BPF_ALU64 | BPF_ARSH | BPF_X:
280 emit(A64_ASRV(is64, dst, dst, src), ctx);
281 break;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700282 /* dst = -dst */
283 case BPF_ALU | BPF_NEG:
284 case BPF_ALU64 | BPF_NEG:
285 emit(A64_NEG(is64, dst, dst), ctx);
286 break;
287 /* dst = BSWAP##imm(dst) */
288 case BPF_ALU | BPF_END | BPF_FROM_LE:
289 case BPF_ALU | BPF_END | BPF_FROM_BE:
290#ifdef CONFIG_CPU_BIG_ENDIAN
291 if (BPF_SRC(code) == BPF_FROM_BE)
292 break;
293#else /* !CONFIG_CPU_BIG_ENDIAN */
294 if (BPF_SRC(code) == BPF_FROM_LE)
295 break;
296#endif
297 switch (imm) {
298 case 16:
299 emit(A64_REV16(is64, dst, dst), ctx);
300 break;
301 case 32:
302 emit(A64_REV32(is64, dst, dst), ctx);
303 break;
304 case 64:
305 emit(A64_REV64(dst, dst), ctx);
306 break;
307 }
308 break;
309 /* dst = imm */
310 case BPF_ALU | BPF_MOV | BPF_K:
311 case BPF_ALU64 | BPF_MOV | BPF_K:
312 emit_a64_mov_i(is64, dst, imm, ctx);
313 break;
314 /* dst = dst OP imm */
315 case BPF_ALU | BPF_ADD | BPF_K:
316 case BPF_ALU64 | BPF_ADD | BPF_K:
317 ctx->tmp_used = 1;
318 emit_a64_mov_i(is64, tmp, imm, ctx);
319 emit(A64_ADD(is64, dst, dst, tmp), ctx);
320 break;
321 case BPF_ALU | BPF_SUB | BPF_K:
322 case BPF_ALU64 | BPF_SUB | BPF_K:
323 ctx->tmp_used = 1;
324 emit_a64_mov_i(is64, tmp, imm, ctx);
325 emit(A64_SUB(is64, dst, dst, tmp), ctx);
326 break;
327 case BPF_ALU | BPF_AND | BPF_K:
328 case BPF_ALU64 | BPF_AND | BPF_K:
329 ctx->tmp_used = 1;
330 emit_a64_mov_i(is64, tmp, imm, ctx);
331 emit(A64_AND(is64, dst, dst, tmp), ctx);
332 break;
333 case BPF_ALU | BPF_OR | BPF_K:
334 case BPF_ALU64 | BPF_OR | BPF_K:
335 ctx->tmp_used = 1;
336 emit_a64_mov_i(is64, tmp, imm, ctx);
337 emit(A64_ORR(is64, dst, dst, tmp), ctx);
338 break;
339 case BPF_ALU | BPF_XOR | BPF_K:
340 case BPF_ALU64 | BPF_XOR | BPF_K:
341 ctx->tmp_used = 1;
342 emit_a64_mov_i(is64, tmp, imm, ctx);
343 emit(A64_EOR(is64, dst, dst, tmp), ctx);
344 break;
345 case BPF_ALU | BPF_MUL | BPF_K:
346 case BPF_ALU64 | BPF_MUL | BPF_K:
347 ctx->tmp_used = 1;
348 emit_a64_mov_i(is64, tmp, imm, ctx);
349 emit(A64_MUL(is64, dst, dst, tmp), ctx);
350 break;
351 case BPF_ALU | BPF_DIV | BPF_K:
352 case BPF_ALU64 | BPF_DIV | BPF_K:
353 ctx->tmp_used = 1;
354 emit_a64_mov_i(is64, tmp, imm, ctx);
355 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
356 break;
357 case BPF_ALU | BPF_MOD | BPF_K:
358 case BPF_ALU64 | BPF_MOD | BPF_K:
359 ctx->tmp_used = 1;
360 emit_a64_mov_i(is64, tmp2, imm, ctx);
361 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
362 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
363 emit(A64_SUB(is64, dst, dst, tmp), ctx);
364 break;
365 case BPF_ALU | BPF_LSH | BPF_K:
366 case BPF_ALU64 | BPF_LSH | BPF_K:
367 emit(A64_LSL(is64, dst, dst, imm), ctx);
368 break;
369 case BPF_ALU | BPF_RSH | BPF_K:
370 case BPF_ALU64 | BPF_RSH | BPF_K:
371 emit(A64_LSR(is64, dst, dst, imm), ctx);
372 break;
373 case BPF_ALU | BPF_ARSH | BPF_K:
374 case BPF_ALU64 | BPF_ARSH | BPF_K:
375 emit(A64_ASR(is64, dst, dst, imm), ctx);
376 break;
377
378#define check_imm(bits, imm) do { \
379 if ((((imm) > 0) && ((imm) >> (bits))) || \
380 (((imm) < 0) && (~(imm) >> (bits)))) { \
381 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
382 i, imm, imm); \
383 return -EINVAL; \
384 } \
385} while (0)
386#define check_imm19(imm) check_imm(19, imm)
387#define check_imm26(imm) check_imm(26, imm)
388
389 /* JUMP off */
390 case BPF_JMP | BPF_JA:
391 jmp_offset = bpf2a64_offset(i + off, i, ctx);
392 check_imm26(jmp_offset);
393 emit(A64_B(jmp_offset), ctx);
394 break;
395 /* IF (dst COND src) JUMP off */
396 case BPF_JMP | BPF_JEQ | BPF_X:
397 case BPF_JMP | BPF_JGT | BPF_X:
398 case BPF_JMP | BPF_JGE | BPF_X:
399 case BPF_JMP | BPF_JNE | BPF_X:
400 case BPF_JMP | BPF_JSGT | BPF_X:
401 case BPF_JMP | BPF_JSGE | BPF_X:
402 emit(A64_CMP(1, dst, src), ctx);
403emit_cond_jmp:
404 jmp_offset = bpf2a64_offset(i + off, i, ctx);
405 check_imm19(jmp_offset);
406 switch (BPF_OP(code)) {
407 case BPF_JEQ:
408 jmp_cond = A64_COND_EQ;
409 break;
410 case BPF_JGT:
411 jmp_cond = A64_COND_HI;
412 break;
413 case BPF_JGE:
414 jmp_cond = A64_COND_CS;
415 break;
416 case BPF_JNE:
417 jmp_cond = A64_COND_NE;
418 break;
419 case BPF_JSGT:
420 jmp_cond = A64_COND_GT;
421 break;
422 case BPF_JSGE:
423 jmp_cond = A64_COND_GE;
424 break;
425 default:
426 return -EFAULT;
427 }
428 emit(A64_B_(jmp_cond, jmp_offset), ctx);
429 break;
430 case BPF_JMP | BPF_JSET | BPF_X:
431 emit(A64_TST(1, dst, src), ctx);
432 goto emit_cond_jmp;
433 /* IF (dst COND imm) JUMP off */
434 case BPF_JMP | BPF_JEQ | BPF_K:
435 case BPF_JMP | BPF_JGT | BPF_K:
436 case BPF_JMP | BPF_JGE | BPF_K:
437 case BPF_JMP | BPF_JNE | BPF_K:
438 case BPF_JMP | BPF_JSGT | BPF_K:
439 case BPF_JMP | BPF_JSGE | BPF_K:
440 ctx->tmp_used = 1;
441 emit_a64_mov_i(1, tmp, imm, ctx);
442 emit(A64_CMP(1, dst, tmp), ctx);
443 goto emit_cond_jmp;
444 case BPF_JMP | BPF_JSET | BPF_K:
445 ctx->tmp_used = 1;
446 emit_a64_mov_i(1, tmp, imm, ctx);
447 emit(A64_TST(1, dst, tmp), ctx);
448 goto emit_cond_jmp;
449 /* function call */
450 case BPF_JMP | BPF_CALL:
451 {
452 const u8 r0 = bpf2a64[BPF_REG_0];
453 const u64 func = (u64)__bpf_call_base + imm;
454
455 ctx->tmp_used = 1;
456 emit_a64_mov_i64(tmp, func, ctx);
457 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
458 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
459 emit(A64_BLR(tmp), ctx);
460 emit(A64_MOV(1, r0, A64_R(0)), ctx);
461 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
462 break;
463 }
464 /* function return */
465 case BPF_JMP | BPF_EXIT:
466 if (i == ctx->prog->len - 1)
467 break;
468 jmp_offset = epilogue_offset(ctx);
469 check_imm26(jmp_offset);
470 emit(A64_B(jmp_offset), ctx);
471 break;
472
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100473 /* dst = imm64 */
474 case BPF_LD | BPF_IMM | BPF_DW:
475 {
476 const struct bpf_insn insn1 = insn[1];
477 u64 imm64;
478
479 if (insn1.code != 0 || insn1.src_reg != 0 ||
480 insn1.dst_reg != 0 || insn1.off != 0) {
481 /* Note: verifier in BPF core must catch invalid
482 * instructions.
483 */
484 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
485 return -EINVAL;
486 }
487
488 imm64 = (u64)insn1.imm << 32 | imm;
489 emit_a64_mov_i64(dst, imm64, ctx);
490
491 return 1;
492 }
493
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700494 /* LDX: dst = *(size *)(src + off) */
495 case BPF_LDX | BPF_MEM | BPF_W:
496 case BPF_LDX | BPF_MEM | BPF_H:
497 case BPF_LDX | BPF_MEM | BPF_B:
498 case BPF_LDX | BPF_MEM | BPF_DW:
499 ctx->tmp_used = 1;
500 emit_a64_mov_i(1, tmp, off, ctx);
501 switch (BPF_SIZE(code)) {
502 case BPF_W:
503 emit(A64_LDR32(dst, src, tmp), ctx);
504 break;
505 case BPF_H:
506 emit(A64_LDRH(dst, src, tmp), ctx);
507 break;
508 case BPF_B:
509 emit(A64_LDRB(dst, src, tmp), ctx);
510 break;
511 case BPF_DW:
512 emit(A64_LDR64(dst, src, tmp), ctx);
513 break;
514 }
515 break;
516
517 /* ST: *(size *)(dst + off) = imm */
518 case BPF_ST | BPF_MEM | BPF_W:
519 case BPF_ST | BPF_MEM | BPF_H:
520 case BPF_ST | BPF_MEM | BPF_B:
521 case BPF_ST | BPF_MEM | BPF_DW:
522 goto notyet;
523
524 /* STX: *(size *)(dst + off) = src */
525 case BPF_STX | BPF_MEM | BPF_W:
526 case BPF_STX | BPF_MEM | BPF_H:
527 case BPF_STX | BPF_MEM | BPF_B:
528 case BPF_STX | BPF_MEM | BPF_DW:
529 ctx->tmp_used = 1;
530 emit_a64_mov_i(1, tmp, off, ctx);
531 switch (BPF_SIZE(code)) {
532 case BPF_W:
533 emit(A64_STR32(src, dst, tmp), ctx);
534 break;
535 case BPF_H:
536 emit(A64_STRH(src, dst, tmp), ctx);
537 break;
538 case BPF_B:
539 emit(A64_STRB(src, dst, tmp), ctx);
540 break;
541 case BPF_DW:
542 emit(A64_STR64(src, dst, tmp), ctx);
543 break;
544 }
545 break;
546 /* STX XADD: lock *(u32 *)(dst + off) += src */
547 case BPF_STX | BPF_XADD | BPF_W:
548 /* STX XADD: lock *(u64 *)(dst + off) += src */
549 case BPF_STX | BPF_XADD | BPF_DW:
550 goto notyet;
551
552 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
553 case BPF_LD | BPF_ABS | BPF_W:
554 case BPF_LD | BPF_ABS | BPF_H:
555 case BPF_LD | BPF_ABS | BPF_B:
556 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
557 case BPF_LD | BPF_IND | BPF_W:
558 case BPF_LD | BPF_IND | BPF_H:
559 case BPF_LD | BPF_IND | BPF_B:
560 {
561 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
562 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
563 const u8 fp = bpf2a64[BPF_REG_FP];
564 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
565 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
566 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
567 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
568 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
569 int size;
570
571 emit(A64_MOV(1, r1, r6), ctx);
572 emit_a64_mov_i(0, r2, imm, ctx);
573 if (BPF_MODE(code) == BPF_IND)
574 emit(A64_ADD(0, r2, r2, src), ctx);
575 switch (BPF_SIZE(code)) {
576 case BPF_W:
577 size = 4;
578 break;
579 case BPF_H:
580 size = 2;
581 break;
582 case BPF_B:
583 size = 1;
584 break;
585 default:
586 return -EINVAL;
587 }
588 emit_a64_mov_i64(r3, size, ctx);
589 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
590 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
591 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
592 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
593 emit(A64_BLR(r5), ctx);
594 emit(A64_MOV(1, r0, A64_R(0)), ctx);
595 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
596
597 jmp_offset = epilogue_offset(ctx);
598 check_imm19(jmp_offset);
599 emit(A64_CBZ(1, r0, jmp_offset), ctx);
600 emit(A64_MOV(1, r5, r0), ctx);
601 switch (BPF_SIZE(code)) {
602 case BPF_W:
603 emit(A64_LDR32(r0, r5, A64_ZR), ctx);
604#ifndef CONFIG_CPU_BIG_ENDIAN
605 emit(A64_REV32(0, r0, r0), ctx);
606#endif
607 break;
608 case BPF_H:
609 emit(A64_LDRH(r0, r5, A64_ZR), ctx);
610#ifndef CONFIG_CPU_BIG_ENDIAN
611 emit(A64_REV16(0, r0, r0), ctx);
612#endif
613 break;
614 case BPF_B:
615 emit(A64_LDRB(r0, r5, A64_ZR), ctx);
616 break;
617 }
618 break;
619 }
620notyet:
621 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
622 return -EFAULT;
623
624 default:
625 pr_err_once("unknown opcode %02x\n", code);
626 return -EINVAL;
627 }
628
629 return 0;
630}
631
632static int build_body(struct jit_ctx *ctx)
633{
634 const struct bpf_prog *prog = ctx->prog;
635 int i;
636
637 for (i = 0; i < prog->len; i++) {
638 const struct bpf_insn *insn = &prog->insnsi[i];
639 int ret;
640
641 if (ctx->image == NULL)
642 ctx->offset[i] = ctx->idx;
643
644 ret = build_insn(insn, ctx);
Zi Shen Lim30d3d942014-09-16 21:29:23 +0100645 if (ret > 0) {
646 i++;
647 continue;
648 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700649 if (ret)
650 return ret;
651 }
652
653 return 0;
654}
655
656static inline void bpf_flush_icache(void *start, void *end)
657{
658 flush_icache_range((unsigned long)start, (unsigned long)end);
659}
660
661void bpf_jit_compile(struct bpf_prog *prog)
662{
663 /* Nothing to do here. We support Internal BPF. */
664}
665
666void bpf_int_jit_compile(struct bpf_prog *prog)
667{
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100668 struct bpf_binary_header *header;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700669 struct jit_ctx ctx;
670 int image_size;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100671 u8 *image_ptr;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700672
673 if (!bpf_jit_enable)
674 return;
675
676 if (!prog || !prog->len)
677 return;
678
679 memset(&ctx, 0, sizeof(ctx));
680 ctx.prog = prog;
681
682 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
683 if (ctx.offset == NULL)
684 return;
685
686 /* 1. Initial fake pass to compute ctx->idx. */
687
688 /* Fake pass to fill in ctx->offset. */
689 if (build_body(&ctx))
690 goto out;
691
692 build_prologue(&ctx);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700693 build_epilogue(&ctx);
694
695 /* Now we know the actual image size. */
696 image_size = sizeof(u32) * ctx.idx;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100697 header = bpf_jit_binary_alloc(image_size, &image_ptr,
698 sizeof(u32), jit_fill_hole);
699 if (header == NULL)
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700700 goto out;
701
702 /* 2. Now, the actual pass. */
703
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100704 ctx.image = (u32 *)image_ptr;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700705 ctx.idx = 0;
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100706
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700707 build_prologue(&ctx);
708
709 ctx.body_offset = ctx.idx;
Daniel Borkmann60ef0492014-09-11 10:36:48 +0100710 if (build_body(&ctx)) {
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100711 bpf_jit_binary_free(header);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700712 goto out;
Daniel Borkmann60ef0492014-09-11 10:36:48 +0100713 }
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700714
715 build_epilogue(&ctx);
716
717 /* And we're done. */
718 if (bpf_jit_enable > 1)
719 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
720
721 bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100722
723 set_memory_ro((unsigned long)header, header->pages);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700724 prog->bpf_func = (void *)ctx.image;
Daniel Borkmann74c3deac2014-10-11 10:00:43 +0100725 prog->jited = true;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700726out:
727 kfree(ctx.offset);
728}
729
730void bpf_jit_free(struct bpf_prog *prog)
731{
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100732 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
733 struct bpf_binary_header *header = (void *)addr;
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700734
Daniel Borkmannb569c1c2014-09-16 08:48:50 +0100735 if (!prog->jited)
736 goto free_filter;
737
738 set_memory_rw(addr, header->pages);
739 bpf_jit_binary_free(header);
740
741free_filter:
742 bpf_prog_unlock_free(prog);
Zi Shen Lime54bcde2014-08-26 21:15:30 -0700743}