blob: 9de0b5476b0ca784cbc1296e021355bd422cf075 [file] [log] [blame]
Eric Dumazet0a148422011-04-20 09:27:32 +00001/* bpf_jit_comp.c : BPF JIT compiler
2 *
Eric Dumazet3b589082013-01-30 17:51:44 -08003 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
Alexei Starovoitov62258272014-05-13 19:50:46 -07004 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Eric Dumazet0a148422011-04-20 09:27:32 +00005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
Eric Dumazet0a148422011-04-20 09:27:32 +000011#include <linux/netdevice.h>
12#include <linux/filter.h>
Eric Dumazet855ddb52012-10-27 02:26:22 +000013#include <linux/if_vlan.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020014#include <asm/cacheflush.h>
Eric Dumazet0a148422011-04-20 09:27:32 +000015
Eric Dumazet0a148422011-04-20 09:27:32 +000016int bpf_jit_enable __read_mostly;
17
18/*
19 * assembly code in arch/x86/net/bpf_jit.S
20 */
Alexei Starovoitov62258272014-05-13 19:50:46 -070021extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
Jan Seifferta998d432012-03-30 05:24:05 +000022extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
Alexei Starovoitov62258272014-05-13 19:50:46 -070023extern u8 sk_load_byte_positive_offset[];
Jan Seifferta998d432012-03-30 05:24:05 +000024extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
Alexei Starovoitov62258272014-05-13 19:50:46 -070025extern u8 sk_load_byte_negative_offset[];
Eric Dumazet0a148422011-04-20 09:27:32 +000026
27static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28{
29 if (len == 1)
30 *ptr = bytes;
31 else if (len == 2)
32 *(u16 *)ptr = bytes;
33 else {
34 *(u32 *)ptr = bytes;
35 barrier();
36 }
37 return ptr + len;
38}
39
40#define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
41
42#define EMIT1(b1) EMIT(b1, 1)
43#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
44#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
45#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
Alexei Starovoitov62258272014-05-13 19:50:46 -070046#define EMIT1_off32(b1, off) \
47 do {EMIT1(b1); EMIT(off, 4); } while (0)
48#define EMIT2_off32(b1, b2, off) \
49 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
50#define EMIT3_off32(b1, b2, b3, off) \
51 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
52#define EMIT4_off32(b1, b2, b3, b4, off) \
53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000054
55static inline bool is_imm8(int value)
56{
57 return value <= 127 && value >= -128;
58}
59
Alexei Starovoitov62258272014-05-13 19:50:46 -070060static inline bool is_simm32(s64 value)
Eric Dumazet0a148422011-04-20 09:27:32 +000061{
Alexei Starovoitov62258272014-05-13 19:50:46 -070062 return value == (s64) (s32) value;
Eric Dumazet0a148422011-04-20 09:27:32 +000063}
64
Alexei Starovoitove430f342014-06-06 14:46:06 -070065/* mov dst, src */
66#define EMIT_mov(DST, SRC) \
67 do {if (DST != SRC) \
68 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
Alexei Starovoitov62258272014-05-13 19:50:46 -070069 } while (0)
70
71static int bpf_size_to_x86_bytes(int bpf_size)
72{
73 if (bpf_size == BPF_W)
74 return 4;
75 else if (bpf_size == BPF_H)
76 return 2;
77 else if (bpf_size == BPF_B)
78 return 1;
79 else if (bpf_size == BPF_DW)
80 return 4; /* imm32 */
81 else
82 return 0;
83}
Eric Dumazet0a148422011-04-20 09:27:32 +000084
85/* list of x86 cond jumps opcodes (. + s8)
86 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
87 */
88#define X86_JB 0x72
89#define X86_JAE 0x73
90#define X86_JE 0x74
91#define X86_JNE 0x75
92#define X86_JBE 0x76
93#define X86_JA 0x77
Alexei Starovoitov62258272014-05-13 19:50:46 -070094#define X86_JGE 0x7D
95#define X86_JG 0x7F
Eric Dumazet0a148422011-04-20 09:27:32 +000096
97static inline void bpf_flush_icache(void *start, void *end)
98{
99 mm_segment_t old_fs = get_fs();
100
101 set_fs(KERNEL_DS);
102 smp_wmb();
103 flush_icache_range((unsigned long)start, (unsigned long)end);
104 set_fs(old_fs);
105}
106
Jan Seifferta998d432012-03-30 05:24:05 +0000107#define CHOOSE_LOAD_FUNC(K, func) \
108 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
Eric Dumazet0a148422011-04-20 09:27:32 +0000109
Alexei Starovoitov62258272014-05-13 19:50:46 -0700110/* pick a register outside of BPF range for JIT internal work */
111#define AUX_REG (MAX_BPF_REG + 1)
112
113/* the following table maps BPF registers to x64 registers.
114 * x64 register r12 is unused, since if used as base address register
115 * in load/store instructions, it always needs an extra byte of encoding
116 */
117static const int reg2hex[] = {
118 [BPF_REG_0] = 0, /* rax */
119 [BPF_REG_1] = 7, /* rdi */
120 [BPF_REG_2] = 6, /* rsi */
121 [BPF_REG_3] = 2, /* rdx */
122 [BPF_REG_4] = 1, /* rcx */
123 [BPF_REG_5] = 0, /* r8 */
124 [BPF_REG_6] = 3, /* rbx callee saved */
125 [BPF_REG_7] = 5, /* r13 callee saved */
126 [BPF_REG_8] = 6, /* r14 callee saved */
127 [BPF_REG_9] = 7, /* r15 callee saved */
128 [BPF_REG_FP] = 5, /* rbp readonly */
129 [AUX_REG] = 3, /* r11 temp register */
130};
131
132/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
133 * which need extra byte of encoding.
134 * rax,rcx,...,rbp have simpler encoding
135 */
136static inline bool is_ereg(u32 reg)
137{
138 if (reg == BPF_REG_5 || reg == AUX_REG ||
139 (reg >= BPF_REG_7 && reg <= BPF_REG_9))
140 return true;
141 else
142 return false;
143}
144
145/* add modifiers if 'reg' maps to x64 registers r8..r15 */
146static inline u8 add_1mod(u8 byte, u32 reg)
147{
148 if (is_ereg(reg))
149 byte |= 1;
150 return byte;
151}
152
153static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
154{
155 if (is_ereg(r1))
156 byte |= 1;
157 if (is_ereg(r2))
158 byte |= 4;
159 return byte;
160}
161
Alexei Starovoitove430f342014-06-06 14:46:06 -0700162/* encode 'dst_reg' register into x64 opcode 'byte' */
163static inline u8 add_1reg(u8 byte, u32 dst_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700164{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700165 return byte + reg2hex[dst_reg];
Alexei Starovoitov62258272014-05-13 19:50:46 -0700166}
167
Alexei Starovoitove430f342014-06-06 14:46:06 -0700168/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700170{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700172}
173
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200174static void jit_fill_hole(void *area, unsigned int size)
175{
176 /* fill whole space with int3 instructions */
177 memset(area, 0xcc, size);
178}
179
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700180struct jit_context {
181 unsigned int cleanup_addr; /* epilogue code offset */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700182 bool seen_ld_abs;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700183};
184
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700185static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700186 int oldproglen, struct jit_context *ctx)
Eric Dumazet0a148422011-04-20 09:27:32 +0000187{
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700188 struct bpf_insn *insn = bpf_prog->insnsi;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700189 int insn_cnt = bpf_prog->len;
Eric Dumazet0a148422011-04-20 09:27:32 +0000190 u8 temp[64];
Alexei Starovoitov62258272014-05-13 19:50:46 -0700191 int i;
192 int proglen = 0;
193 u8 *prog = temp;
194 int stacksize = MAX_BPF_STACK +
195 32 /* space for rbx, r13, r14, r15 */ +
196 8 /* space for skb_copy_bits() buffer */;
Eric Dumazet0a148422011-04-20 09:27:32 +0000197
Alexei Starovoitov62258272014-05-13 19:50:46 -0700198 EMIT1(0x55); /* push rbp */
199 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
Eric Dumazet0a148422011-04-20 09:27:32 +0000200
Alexei Starovoitov62258272014-05-13 19:50:46 -0700201 /* sub rsp, stacksize */
202 EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
Eric Dumazet0a148422011-04-20 09:27:32 +0000203
Alexei Starovoitov62258272014-05-13 19:50:46 -0700204 /* all classic BPF filters use R6(rbx) save it */
Eric Dumazet0a148422011-04-20 09:27:32 +0000205
Alexei Starovoitov62258272014-05-13 19:50:46 -0700206 /* mov qword ptr [rbp-X],rbx */
207 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
208
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700209 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
Alexei Starovoitov62258272014-05-13 19:50:46 -0700210 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
211 * R8(r14). R9(r15) spill could be made conditional, but there is only
212 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
213 * The overhead of extra spill is negligible for any filter other
214 * than synthetic ones. Therefore not worth adding complexity.
215 */
216
217 /* mov qword ptr [rbp-X],r13 */
218 EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
219 /* mov qword ptr [rbp-X],r14 */
220 EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
221 /* mov qword ptr [rbp-X],r15 */
222 EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
223
224 /* clear A and X registers */
225 EMIT2(0x31, 0xc0); /* xor eax, eax */
226 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
227
228 if (ctx->seen_ld_abs) {
229 /* r9d : skb->len - skb->data_len (headlen)
230 * r10 : skb->data
231 */
232 if (is_imm8(offsetof(struct sk_buff, len)))
233 /* mov %r9d, off8(%rdi) */
234 EMIT4(0x44, 0x8b, 0x4f,
235 offsetof(struct sk_buff, len));
236 else
237 /* mov %r9d, off32(%rdi) */
238 EMIT3_off32(0x44, 0x8b, 0x8f,
239 offsetof(struct sk_buff, len));
240
241 if (is_imm8(offsetof(struct sk_buff, data_len)))
242 /* sub %r9d, off8(%rdi) */
243 EMIT4(0x44, 0x2b, 0x4f,
244 offsetof(struct sk_buff, data_len));
245 else
246 EMIT3_off32(0x44, 0x2b, 0x8f,
247 offsetof(struct sk_buff, data_len));
248
249 if (is_imm8(offsetof(struct sk_buff, data)))
250 /* mov %r10, off8(%rdi) */
251 EMIT4(0x4c, 0x8b, 0x57,
252 offsetof(struct sk_buff, data));
253 else
254 /* mov %r10, off32(%rdi) */
255 EMIT3_off32(0x4c, 0x8b, 0x97,
256 offsetof(struct sk_buff, data));
257 }
258
259 for (i = 0; i < insn_cnt; i++, insn++) {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700260 const s32 imm32 = insn->imm;
261 u32 dst_reg = insn->dst_reg;
262 u32 src_reg = insn->src_reg;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700263 u8 b1 = 0, b2 = 0, b3 = 0;
264 s64 jmp_offset;
265 u8 jmp_cond;
266 int ilen;
267 u8 *func;
268
269 switch (insn->code) {
270 /* ALU */
271 case BPF_ALU | BPF_ADD | BPF_X:
272 case BPF_ALU | BPF_SUB | BPF_X:
273 case BPF_ALU | BPF_AND | BPF_X:
274 case BPF_ALU | BPF_OR | BPF_X:
275 case BPF_ALU | BPF_XOR | BPF_X:
276 case BPF_ALU64 | BPF_ADD | BPF_X:
277 case BPF_ALU64 | BPF_SUB | BPF_X:
278 case BPF_ALU64 | BPF_AND | BPF_X:
279 case BPF_ALU64 | BPF_OR | BPF_X:
280 case BPF_ALU64 | BPF_XOR | BPF_X:
281 switch (BPF_OP(insn->code)) {
282 case BPF_ADD: b2 = 0x01; break;
283 case BPF_SUB: b2 = 0x29; break;
284 case BPF_AND: b2 = 0x21; break;
285 case BPF_OR: b2 = 0x09; break;
286 case BPF_XOR: b2 = 0x31; break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000287 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700288 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700289 EMIT1(add_2mod(0x48, dst_reg, src_reg));
290 else if (is_ereg(dst_reg) || is_ereg(src_reg))
291 EMIT1(add_2mod(0x40, dst_reg, src_reg));
292 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000293 break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000294
Alexei Starovoitove430f342014-06-06 14:46:06 -0700295 /* mov dst, src */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700296 case BPF_ALU64 | BPF_MOV | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700297 EMIT_mov(dst_reg, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700298 break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000299
Alexei Starovoitove430f342014-06-06 14:46:06 -0700300 /* mov32 dst, src */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700301 case BPF_ALU | BPF_MOV | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700302 if (is_ereg(dst_reg) || is_ereg(src_reg))
303 EMIT1(add_2mod(0x40, dst_reg, src_reg));
304 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700305 break;
Eric Dumazet3b589082013-01-30 17:51:44 -0800306
Alexei Starovoitove430f342014-06-06 14:46:06 -0700307 /* neg dst */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700308 case BPF_ALU | BPF_NEG:
309 case BPF_ALU64 | BPF_NEG:
310 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700311 EMIT1(add_1mod(0x48, dst_reg));
312 else if (is_ereg(dst_reg))
313 EMIT1(add_1mod(0x40, dst_reg));
314 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700315 break;
316
317 case BPF_ALU | BPF_ADD | BPF_K:
318 case BPF_ALU | BPF_SUB | BPF_K:
319 case BPF_ALU | BPF_AND | BPF_K:
320 case BPF_ALU | BPF_OR | BPF_K:
321 case BPF_ALU | BPF_XOR | BPF_K:
322 case BPF_ALU64 | BPF_ADD | BPF_K:
323 case BPF_ALU64 | BPF_SUB | BPF_K:
324 case BPF_ALU64 | BPF_AND | BPF_K:
325 case BPF_ALU64 | BPF_OR | BPF_K:
326 case BPF_ALU64 | BPF_XOR | BPF_K:
327 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700328 EMIT1(add_1mod(0x48, dst_reg));
329 else if (is_ereg(dst_reg))
330 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700331
332 switch (BPF_OP(insn->code)) {
333 case BPF_ADD: b3 = 0xC0; break;
334 case BPF_SUB: b3 = 0xE8; break;
335 case BPF_AND: b3 = 0xE0; break;
336 case BPF_OR: b3 = 0xC8; break;
337 case BPF_XOR: b3 = 0xF0; break;
338 }
339
Alexei Starovoitove430f342014-06-06 14:46:06 -0700340 if (is_imm8(imm32))
341 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700342 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700343 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700344 break;
345
346 case BPF_ALU64 | BPF_MOV | BPF_K:
347 /* optimization: if imm32 is positive,
348 * use 'mov eax, imm32' (which zero-extends imm32)
349 * to save 2 bytes
350 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700351 if (imm32 < 0) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700352 /* 'mov rax, imm32' sign extends imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700353 b1 = add_1mod(0x48, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700354 b2 = 0xC7;
355 b3 = 0xC0;
Alexei Starovoitove430f342014-06-06 14:46:06 -0700356 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
Eric Dumazet3b589082013-01-30 17:51:44 -0800357 break;
358 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700359
360 case BPF_ALU | BPF_MOV | BPF_K:
361 /* mov %eax, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700362 if (is_ereg(dst_reg))
363 EMIT1(add_1mod(0x40, dst_reg));
364 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700365 break;
366
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700367 case BPF_LD | BPF_IMM | BPF_DW:
368 if (insn[1].code != 0 || insn[1].src_reg != 0 ||
369 insn[1].dst_reg != 0 || insn[1].off != 0) {
370 /* verifier must catch invalid insns */
371 pr_err("invalid BPF_LD_IMM64 insn\n");
372 return -EINVAL;
373 }
374
375 /* movabsq %rax, imm64 */
376 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
377 EMIT(insn[0].imm, 4);
378 EMIT(insn[1].imm, 4);
379
380 insn++;
381 i++;
382 break;
383
Alexei Starovoitove430f342014-06-06 14:46:06 -0700384 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700385 case BPF_ALU | BPF_MOD | BPF_X:
386 case BPF_ALU | BPF_DIV | BPF_X:
387 case BPF_ALU | BPF_MOD | BPF_K:
388 case BPF_ALU | BPF_DIV | BPF_K:
389 case BPF_ALU64 | BPF_MOD | BPF_X:
390 case BPF_ALU64 | BPF_DIV | BPF_X:
391 case BPF_ALU64 | BPF_MOD | BPF_K:
392 case BPF_ALU64 | BPF_DIV | BPF_K:
393 EMIT1(0x50); /* push rax */
394 EMIT1(0x52); /* push rdx */
395
396 if (BPF_SRC(insn->code) == BPF_X)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700397 /* mov r11, src_reg */
398 EMIT_mov(AUX_REG, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700399 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700400 /* mov r11, imm32 */
401 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700402
Alexei Starovoitove430f342014-06-06 14:46:06 -0700403 /* mov rax, dst_reg */
404 EMIT_mov(BPF_REG_0, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700405
406 /* xor edx, edx
407 * equivalent to 'xor rdx, rdx', but one byte less
408 */
409 EMIT2(0x31, 0xd2);
410
411 if (BPF_SRC(insn->code) == BPF_X) {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700412 /* if (src_reg == 0) return 0 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700413
414 /* cmp r11, 0 */
415 EMIT4(0x49, 0x83, 0xFB, 0x00);
416
417 /* jne .+9 (skip over pop, pop, xor and jmp) */
418 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
419 EMIT1(0x5A); /* pop rdx */
420 EMIT1(0x58); /* pop rax */
421 EMIT2(0x31, 0xc0); /* xor eax, eax */
422
423 /* jmp cleanup_addr
424 * addrs[i] - 11, because there are 11 bytes
425 * after this insn: div, mov, pop, pop, mov
426 */
427 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
428 EMIT1_off32(0xE9, jmp_offset);
429 }
430
431 if (BPF_CLASS(insn->code) == BPF_ALU64)
432 /* div r11 */
433 EMIT3(0x49, 0xF7, 0xF3);
434 else
435 /* div r11d */
436 EMIT3(0x41, 0xF7, 0xF3);
437
438 if (BPF_OP(insn->code) == BPF_MOD)
439 /* mov r11, rdx */
440 EMIT3(0x49, 0x89, 0xD3);
441 else
442 /* mov r11, rax */
443 EMIT3(0x49, 0x89, 0xC3);
444
445 EMIT1(0x5A); /* pop rdx */
446 EMIT1(0x58); /* pop rax */
447
Alexei Starovoitove430f342014-06-06 14:46:06 -0700448 /* mov dst_reg, r11 */
449 EMIT_mov(dst_reg, AUX_REG);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700450 break;
451
452 case BPF_ALU | BPF_MUL | BPF_K:
453 case BPF_ALU | BPF_MUL | BPF_X:
454 case BPF_ALU64 | BPF_MUL | BPF_K:
455 case BPF_ALU64 | BPF_MUL | BPF_X:
456 EMIT1(0x50); /* push rax */
457 EMIT1(0x52); /* push rdx */
458
Alexei Starovoitove430f342014-06-06 14:46:06 -0700459 /* mov r11, dst_reg */
460 EMIT_mov(AUX_REG, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700461
462 if (BPF_SRC(insn->code) == BPF_X)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700463 /* mov rax, src_reg */
464 EMIT_mov(BPF_REG_0, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700465 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700466 /* mov rax, imm32 */
467 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700468
469 if (BPF_CLASS(insn->code) == BPF_ALU64)
470 EMIT1(add_1mod(0x48, AUX_REG));
471 else if (is_ereg(AUX_REG))
472 EMIT1(add_1mod(0x40, AUX_REG));
473 /* mul(q) r11 */
474 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
475
476 /* mov r11, rax */
477 EMIT_mov(AUX_REG, BPF_REG_0);
478
479 EMIT1(0x5A); /* pop rdx */
480 EMIT1(0x58); /* pop rax */
481
Alexei Starovoitove430f342014-06-06 14:46:06 -0700482 /* mov dst_reg, r11 */
483 EMIT_mov(dst_reg, AUX_REG);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700484 break;
485
486 /* shifts */
487 case BPF_ALU | BPF_LSH | BPF_K:
488 case BPF_ALU | BPF_RSH | BPF_K:
489 case BPF_ALU | BPF_ARSH | BPF_K:
490 case BPF_ALU64 | BPF_LSH | BPF_K:
491 case BPF_ALU64 | BPF_RSH | BPF_K:
492 case BPF_ALU64 | BPF_ARSH | BPF_K:
493 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700494 EMIT1(add_1mod(0x48, dst_reg));
495 else if (is_ereg(dst_reg))
496 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700497
498 switch (BPF_OP(insn->code)) {
499 case BPF_LSH: b3 = 0xE0; break;
500 case BPF_RSH: b3 = 0xE8; break;
501 case BPF_ARSH: b3 = 0xF8; break;
502 }
Alexei Starovoitove430f342014-06-06 14:46:06 -0700503 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700504 break;
505
Alexei Starovoitov72b603e2014-08-25 12:27:02 -0700506 case BPF_ALU | BPF_LSH | BPF_X:
507 case BPF_ALU | BPF_RSH | BPF_X:
508 case BPF_ALU | BPF_ARSH | BPF_X:
509 case BPF_ALU64 | BPF_LSH | BPF_X:
510 case BPF_ALU64 | BPF_RSH | BPF_X:
511 case BPF_ALU64 | BPF_ARSH | BPF_X:
512
513 /* check for bad case when dst_reg == rcx */
514 if (dst_reg == BPF_REG_4) {
515 /* mov r11, dst_reg */
516 EMIT_mov(AUX_REG, dst_reg);
517 dst_reg = AUX_REG;
518 }
519
520 if (src_reg != BPF_REG_4) { /* common case */
521 EMIT1(0x51); /* push rcx */
522
523 /* mov rcx, src_reg */
524 EMIT_mov(BPF_REG_4, src_reg);
525 }
526
527 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
528 if (BPF_CLASS(insn->code) == BPF_ALU64)
529 EMIT1(add_1mod(0x48, dst_reg));
530 else if (is_ereg(dst_reg))
531 EMIT1(add_1mod(0x40, dst_reg));
532
533 switch (BPF_OP(insn->code)) {
534 case BPF_LSH: b3 = 0xE0; break;
535 case BPF_RSH: b3 = 0xE8; break;
536 case BPF_ARSH: b3 = 0xF8; break;
537 }
538 EMIT2(0xD3, add_1reg(b3, dst_reg));
539
540 if (src_reg != BPF_REG_4)
541 EMIT1(0x59); /* pop rcx */
542
543 if (insn->dst_reg == BPF_REG_4)
544 /* mov dst_reg, r11 */
545 EMIT_mov(insn->dst_reg, AUX_REG);
546 break;
547
Alexei Starovoitov62258272014-05-13 19:50:46 -0700548 case BPF_ALU | BPF_END | BPF_FROM_BE:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700549 switch (imm32) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700550 case 16:
551 /* emit 'ror %ax, 8' to swap lower 2 bytes */
552 EMIT1(0x66);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700553 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700554 EMIT1(0x41);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700555 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
Eric Dumazet0a148422011-04-20 09:27:32 +0000556 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700557 case 32:
558 /* emit 'bswap eax' to swap lower 4 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700559 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700560 EMIT2(0x41, 0x0F);
561 else
562 EMIT1(0x0F);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700563 EMIT1(add_1reg(0xC8, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000564 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700565 case 64:
566 /* emit 'bswap rax' to swap 8 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700567 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
568 add_1reg(0xC8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700569 break;
570 }
571 break;
572
573 case BPF_ALU | BPF_END | BPF_FROM_LE:
574 break;
575
Alexei Starovoitove430f342014-06-06 14:46:06 -0700576 /* ST: *(u8*)(dst_reg + off) = imm */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700577 case BPF_ST | BPF_MEM | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700578 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700579 EMIT2(0x41, 0xC6);
580 else
581 EMIT1(0xC6);
582 goto st;
583 case BPF_ST | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700584 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700585 EMIT3(0x66, 0x41, 0xC7);
586 else
587 EMIT2(0x66, 0xC7);
588 goto st;
589 case BPF_ST | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700590 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700591 EMIT2(0x41, 0xC7);
592 else
593 EMIT1(0xC7);
594 goto st;
595 case BPF_ST | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700596 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700597
598st: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700599 EMIT2(add_1reg(0x40, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700600 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700601 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700602
Alexei Starovoitove430f342014-06-06 14:46:06 -0700603 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700604 break;
605
Alexei Starovoitove430f342014-06-06 14:46:06 -0700606 /* STX: *(u8*)(dst_reg + off) = src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700607 case BPF_STX | BPF_MEM | BPF_B:
608 /* emit 'mov byte ptr [rax + off], al' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700609 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
Alexei Starovoitov62258272014-05-13 19:50:46 -0700610 /* have to add extra byte for x86 SIL, DIL regs */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700611 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
612 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700613 else
614 EMIT1(0x88);
615 goto stx;
616 case BPF_STX | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700617 if (is_ereg(dst_reg) || is_ereg(src_reg))
618 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700619 else
620 EMIT2(0x66, 0x89);
621 goto stx;
622 case BPF_STX | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700623 if (is_ereg(dst_reg) || is_ereg(src_reg))
624 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700625 else
626 EMIT1(0x89);
627 goto stx;
628 case BPF_STX | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700629 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700630stx: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700631 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700632 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700633 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700634 insn->off);
635 break;
636
Alexei Starovoitove430f342014-06-06 14:46:06 -0700637 /* LDX: dst_reg = *(u8*)(src_reg + off) */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700638 case BPF_LDX | BPF_MEM | BPF_B:
639 /* emit 'movzx rax, byte ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700640 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700641 goto ldx;
642 case BPF_LDX | BPF_MEM | BPF_H:
643 /* emit 'movzx rax, word ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700644 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700645 goto ldx;
646 case BPF_LDX | BPF_MEM | BPF_W:
647 /* emit 'mov eax, dword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700648 if (is_ereg(dst_reg) || is_ereg(src_reg))
649 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700650 else
651 EMIT1(0x8B);
652 goto ldx;
653 case BPF_LDX | BPF_MEM | BPF_DW:
654 /* emit 'mov rax, qword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700655 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700656ldx: /* if insn->off == 0 we can save one extra byte, but
657 * special case of x86 r13 which always needs an offset
658 * is not worth the hassle
659 */
660 if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700661 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700662 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700663 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700664 insn->off);
665 break;
666
Alexei Starovoitove430f342014-06-06 14:46:06 -0700667 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700668 case BPF_STX | BPF_XADD | BPF_W:
669 /* emit 'lock add dword ptr [rax + off], eax' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700670 if (is_ereg(dst_reg) || is_ereg(src_reg))
671 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700672 else
673 EMIT2(0xF0, 0x01);
674 goto xadd;
675 case BPF_STX | BPF_XADD | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700676 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700677xadd: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700678 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700679 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700680 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700681 insn->off);
682 break;
683
684 /* call */
685 case BPF_JMP | BPF_CALL:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700686 func = (u8 *) __bpf_call_base + imm32;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700687 jmp_offset = func - (image + addrs[i]);
688 if (ctx->seen_ld_abs) {
689 EMIT2(0x41, 0x52); /* push %r10 */
690 EMIT2(0x41, 0x51); /* push %r9 */
691 /* need to adjust jmp offset, since
692 * pop %r9, pop %r10 take 4 bytes after call insn
693 */
694 jmp_offset += 4;
695 }
Alexei Starovoitove430f342014-06-06 14:46:06 -0700696 if (!imm32 || !is_simm32(jmp_offset)) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700697 pr_err("unsupported bpf func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -0700698 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700699 return -EINVAL;
700 }
701 EMIT1_off32(0xE8, jmp_offset);
702 if (ctx->seen_ld_abs) {
703 EMIT2(0x41, 0x59); /* pop %r9 */
704 EMIT2(0x41, 0x5A); /* pop %r10 */
705 }
706 break;
707
708 /* cond jump */
709 case BPF_JMP | BPF_JEQ | BPF_X:
710 case BPF_JMP | BPF_JNE | BPF_X:
711 case BPF_JMP | BPF_JGT | BPF_X:
712 case BPF_JMP | BPF_JGE | BPF_X:
713 case BPF_JMP | BPF_JSGT | BPF_X:
714 case BPF_JMP | BPF_JSGE | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700715 /* cmp dst_reg, src_reg */
716 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
717 add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700718 goto emit_cond_jmp;
719
720 case BPF_JMP | BPF_JSET | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700721 /* test dst_reg, src_reg */
722 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
723 add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700724 goto emit_cond_jmp;
725
726 case BPF_JMP | BPF_JSET | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700727 /* test dst_reg, imm32 */
728 EMIT1(add_1mod(0x48, dst_reg));
729 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700730 goto emit_cond_jmp;
731
732 case BPF_JMP | BPF_JEQ | BPF_K:
733 case BPF_JMP | BPF_JNE | BPF_K:
734 case BPF_JMP | BPF_JGT | BPF_K:
735 case BPF_JMP | BPF_JGE | BPF_K:
736 case BPF_JMP | BPF_JSGT | BPF_K:
737 case BPF_JMP | BPF_JSGE | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700738 /* cmp dst_reg, imm8/32 */
739 EMIT1(add_1mod(0x48, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700740
Alexei Starovoitove430f342014-06-06 14:46:06 -0700741 if (is_imm8(imm32))
742 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700743 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700744 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700745
746emit_cond_jmp: /* convert BPF opcode to x86 */
747 switch (BPF_OP(insn->code)) {
748 case BPF_JEQ:
749 jmp_cond = X86_JE;
750 break;
751 case BPF_JSET:
752 case BPF_JNE:
753 jmp_cond = X86_JNE;
754 break;
755 case BPF_JGT:
756 /* GT is unsigned '>', JA in x86 */
757 jmp_cond = X86_JA;
758 break;
759 case BPF_JGE:
760 /* GE is unsigned '>=', JAE in x86 */
761 jmp_cond = X86_JAE;
762 break;
763 case BPF_JSGT:
764 /* signed '>', GT in x86 */
765 jmp_cond = X86_JG;
766 break;
767 case BPF_JSGE:
768 /* signed '>=', GE in x86 */
769 jmp_cond = X86_JGE;
770 break;
771 default: /* to silence gcc warning */
772 return -EFAULT;
773 }
774 jmp_offset = addrs[i + insn->off] - addrs[i];
775 if (is_imm8(jmp_offset)) {
776 EMIT2(jmp_cond, jmp_offset);
777 } else if (is_simm32(jmp_offset)) {
778 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
779 } else {
780 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
781 return -EFAULT;
782 }
783
784 break;
785
786 case BPF_JMP | BPF_JA:
787 jmp_offset = addrs[i + insn->off] - addrs[i];
788 if (!jmp_offset)
789 /* optimize out nop jumps */
790 break;
791emit_jmp:
792 if (is_imm8(jmp_offset)) {
793 EMIT2(0xEB, jmp_offset);
794 } else if (is_simm32(jmp_offset)) {
795 EMIT1_off32(0xE9, jmp_offset);
796 } else {
797 pr_err("jmp gen bug %llx\n", jmp_offset);
798 return -EFAULT;
799 }
800 break;
801
802 case BPF_LD | BPF_IND | BPF_W:
803 func = sk_load_word;
804 goto common_load;
805 case BPF_LD | BPF_ABS | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700806 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700807common_load: ctx->seen_ld_abs = true;
808 jmp_offset = func - (image + addrs[i]);
809 if (!func || !is_simm32(jmp_offset)) {
810 pr_err("unsupported bpf func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -0700811 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700812 return -EINVAL;
813 }
814 if (BPF_MODE(insn->code) == BPF_ABS) {
815 /* mov %esi, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700816 EMIT1_off32(0xBE, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700817 } else {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700818 /* mov %rsi, src_reg */
819 EMIT_mov(BPF_REG_2, src_reg);
820 if (imm32) {
821 if (is_imm8(imm32))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700822 /* add %esi, imm8 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700823 EMIT3(0x83, 0xC6, imm32);
Eric Dumazet0a148422011-04-20 09:27:32 +0000824 else
Alexei Starovoitov62258272014-05-13 19:50:46 -0700825 /* add %esi, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700826 EMIT2_off32(0x81, 0xC6, imm32);
Eric Dumazet0a148422011-04-20 09:27:32 +0000827 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700828 }
829 /* skb pointer is in R6 (%rbx), it will be copied into
830 * %rdi if skb_copy_bits() call is necessary.
831 * sk_load_* helpers also use %r10 and %r9d.
832 * See bpf_jit.S
833 */
834 EMIT1_off32(0xE8, jmp_offset); /* call */
835 break;
836
837 case BPF_LD | BPF_IND | BPF_H:
838 func = sk_load_half;
839 goto common_load;
840 case BPF_LD | BPF_ABS | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700841 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700842 goto common_load;
843 case BPF_LD | BPF_IND | BPF_B:
844 func = sk_load_byte;
845 goto common_load;
846 case BPF_LD | BPF_ABS | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700847 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700848 goto common_load;
849
850 case BPF_JMP | BPF_EXIT:
851 if (i != insn_cnt - 1) {
852 jmp_offset = ctx->cleanup_addr - addrs[i];
853 goto emit_jmp;
854 }
855 /* update cleanup_addr */
856 ctx->cleanup_addr = proglen;
857 /* mov rbx, qword ptr [rbp-X] */
858 EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
859 /* mov r13, qword ptr [rbp-X] */
860 EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
861 /* mov r14, qword ptr [rbp-X] */
862 EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
863 /* mov r15, qword ptr [rbp-X] */
864 EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
865
866 EMIT1(0xC9); /* leave */
867 EMIT1(0xC3); /* ret */
868 break;
869
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700870 default:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700871 /* By design x64 JIT should support all BPF instructions
872 * This error will be seen if new instruction was added
873 * to interpreter, but not to JIT
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700874 * or if there is junk in bpf_prog
Alexei Starovoitov62258272014-05-13 19:50:46 -0700875 */
876 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700877 return -EINVAL;
Eric Dumazet0a148422011-04-20 09:27:32 +0000878 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700879
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700880 ilen = prog - temp;
881 if (image) {
882 if (unlikely(proglen + ilen > oldproglen)) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700883 pr_err("bpf_jit_compile fatal error\n");
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700884 return -EFAULT;
885 }
886 memcpy(image + proglen, temp, ilen);
887 }
888 proglen += ilen;
889 addrs[i] = proglen;
890 prog = temp;
891 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700892 return proglen;
893}
894
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700895void bpf_jit_compile(struct bpf_prog *prog)
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700896{
Alexei Starovoitov62258272014-05-13 19:50:46 -0700897}
898
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700899void bpf_int_jit_compile(struct bpf_prog *prog)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700900{
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700901 struct bpf_binary_header *header = NULL;
902 int proglen, oldproglen = 0;
903 struct jit_context ctx = {};
904 u8 *image = NULL;
905 int *addrs;
906 int pass;
907 int i;
908
909 if (!bpf_jit_enable)
910 return;
911
912 if (!prog || !prog->len)
913 return;
914
915 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
916 if (!addrs)
917 return;
918
919 /* Before first pass, make a rough estimation of addrs[]
920 * each bpf instruction is translated to less than 64 bytes
921 */
922 for (proglen = 0, i = 0; i < prog->len; i++) {
923 proglen += 64;
924 addrs[i] = proglen;
925 }
926 ctx.cleanup_addr = proglen;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700927
928 for (pass = 0; pass < 10; pass++) {
929 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
930 if (proglen <= 0) {
931 image = NULL;
932 if (header)
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200933 bpf_jit_binary_free(header);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700934 goto out;
935 }
Eric Dumazet0a148422011-04-20 09:27:32 +0000936 if (image) {
Eric Dumazetd00a9dd2012-01-18 07:21:42 +0000937 if (proglen != oldproglen)
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700938 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
939 proglen, oldproglen);
Eric Dumazet0a148422011-04-20 09:27:32 +0000940 break;
941 }
942 if (proglen == oldproglen) {
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200943 header = bpf_jit_binary_alloc(proglen, &image,
944 1, jit_fill_hole);
Eric Dumazet314beb92013-05-17 16:37:03 +0000945 if (!header)
Eric Dumazet0a148422011-04-20 09:27:32 +0000946 goto out;
947 }
948 oldproglen = proglen;
949 }
Daniel Borkmann79617802013-03-21 22:22:03 +0100950
Eric Dumazet0a148422011-04-20 09:27:32 +0000951 if (bpf_jit_enable > 1)
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700952 bpf_jit_dump(prog->len, proglen, 0, image);
Eric Dumazet0a148422011-04-20 09:27:32 +0000953
954 if (image) {
Eric Dumazet314beb92013-05-17 16:37:03 +0000955 bpf_flush_icache(header, image + proglen);
956 set_memory_ro((unsigned long)header, header->pages);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700957 prog->bpf_func = (void *)image;
958 prog->jited = 1;
Eric Dumazet0a148422011-04-20 09:27:32 +0000959 }
960out:
961 kfree(addrs);
Eric Dumazet0a148422011-04-20 09:27:32 +0000962}
963
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200964void bpf_jit_free(struct bpf_prog *fp)
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700965{
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700966 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
967 struct bpf_binary_header *header = (void *)addr;
968
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200969 if (!fp->jited)
970 goto free_filter;
971
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700972 set_memory_rw(addr, header->pages);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200973 bpf_jit_binary_free(header);
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700974
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200975free_filter:
976 bpf_prog_unlock_free(fp);
Eric Dumazet0a148422011-04-20 09:27:32 +0000977}