blob: d9dabd0c31fc4bb0a92956d01f1f7d826eaf4adc [file] [log] [blame]
Eric Dumazet0a148422011-04-20 09:27:32 +00001/* bpf_jit_comp.c : BPF JIT compiler
2 *
Eric Dumazet3b589082013-01-30 17:51:44 -08003 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
Alexei Starovoitov62258272014-05-13 19:50:46 -07004 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Eric Dumazet0a148422011-04-20 09:27:32 +00005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
Eric Dumazet0a148422011-04-20 09:27:32 +000011#include <linux/netdevice.h>
12#include <linux/filter.h>
Eric Dumazet855ddb52012-10-27 02:26:22 +000013#include <linux/if_vlan.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020014#include <asm/cacheflush.h>
Daniel Borkmann017219b2018-03-08 16:17:34 +010015#include <asm/nospec-branch.h>
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -070016#include <linux/bpf.h>
Eric Dumazet0a148422011-04-20 09:27:32 +000017
Eric Dumazet0a148422011-04-20 09:27:32 +000018/*
19 * assembly code in arch/x86/net/bpf_jit.S
20 */
Alexei Starovoitov62258272014-05-13 19:50:46 -070021extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
Jan Seifferta998d432012-03-30 05:24:05 +000022extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
Alexei Starovoitov62258272014-05-13 19:50:46 -070023extern u8 sk_load_byte_positive_offset[];
Jan Seifferta998d432012-03-30 05:24:05 +000024extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
Alexei Starovoitov62258272014-05-13 19:50:46 -070025extern u8 sk_load_byte_negative_offset[];
Eric Dumazet0a148422011-04-20 09:27:32 +000026
Joe Perches5cccc702014-12-04 17:01:24 -080027static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
Eric Dumazet0a148422011-04-20 09:27:32 +000028{
29 if (len == 1)
30 *ptr = bytes;
31 else if (len == 2)
32 *(u16 *)ptr = bytes;
33 else {
34 *(u32 *)ptr = bytes;
35 barrier();
36 }
37 return ptr + len;
38}
39
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -070040#define EMIT(bytes, len) \
41 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000042
43#define EMIT1(b1) EMIT(b1, 1)
44#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
45#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
46#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
Alexei Starovoitov62258272014-05-13 19:50:46 -070047#define EMIT1_off32(b1, off) \
48 do {EMIT1(b1); EMIT(off, 4); } while (0)
49#define EMIT2_off32(b1, b2, off) \
50 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
51#define EMIT3_off32(b1, b2, b3, off) \
52 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
53#define EMIT4_off32(b1, b2, b3, b4, off) \
54 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000055
Joe Perches5cccc702014-12-04 17:01:24 -080056static bool is_imm8(int value)
Eric Dumazet0a148422011-04-20 09:27:32 +000057{
58 return value <= 127 && value >= -128;
59}
60
Joe Perches5cccc702014-12-04 17:01:24 -080061static bool is_simm32(s64 value)
Eric Dumazet0a148422011-04-20 09:27:32 +000062{
Alexei Starovoitov62258272014-05-13 19:50:46 -070063 return value == (s64) (s32) value;
Eric Dumazet0a148422011-04-20 09:27:32 +000064}
65
Alexei Starovoitove430f342014-06-06 14:46:06 -070066/* mov dst, src */
67#define EMIT_mov(DST, SRC) \
68 do {if (DST != SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
Alexei Starovoitov62258272014-05-13 19:50:46 -070070 } while (0)
71
72static int bpf_size_to_x86_bytes(int bpf_size)
73{
74 if (bpf_size == BPF_W)
75 return 4;
76 else if (bpf_size == BPF_H)
77 return 2;
78 else if (bpf_size == BPF_B)
79 return 1;
80 else if (bpf_size == BPF_DW)
81 return 4; /* imm32 */
82 else
83 return 0;
84}
Eric Dumazet0a148422011-04-20 09:27:32 +000085
86/* list of x86 cond jumps opcodes (. + s8)
87 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
88 */
89#define X86_JB 0x72
90#define X86_JAE 0x73
91#define X86_JE 0x74
92#define X86_JNE 0x75
93#define X86_JBE 0x76
94#define X86_JA 0x77
Alexei Starovoitov62258272014-05-13 19:50:46 -070095#define X86_JGE 0x7D
96#define X86_JG 0x7F
Eric Dumazet0a148422011-04-20 09:27:32 +000097
Joe Perches5cccc702014-12-04 17:01:24 -080098static void bpf_flush_icache(void *start, void *end)
Eric Dumazet0a148422011-04-20 09:27:32 +000099{
100 mm_segment_t old_fs = get_fs();
101
102 set_fs(KERNEL_DS);
103 smp_wmb();
104 flush_icache_range((unsigned long)start, (unsigned long)end);
105 set_fs(old_fs);
106}
107
Jan Seifferta998d432012-03-30 05:24:05 +0000108#define CHOOSE_LOAD_FUNC(K, func) \
109 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
Eric Dumazet0a148422011-04-20 09:27:32 +0000110
Alexei Starovoitov62258272014-05-13 19:50:46 -0700111/* pick a register outside of BPF range for JIT internal work */
Daniel Borkmann959a7572016-05-13 19:08:33 +0200112#define AUX_REG (MAX_BPF_JIT_REG + 1)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700113
Daniel Borkmann959a7572016-05-13 19:08:33 +0200114/* The following table maps BPF registers to x64 registers.
115 *
116 * x64 register r12 is unused, since if used as base address
117 * register in load/store instructions, it always needs an
118 * extra byte of encoding and is callee saved.
119 *
120 * r9 caches skb->len - skb->data_len
121 * r10 caches skb->data, and used for blinding (if enabled)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700122 */
123static const int reg2hex[] = {
124 [BPF_REG_0] = 0, /* rax */
125 [BPF_REG_1] = 7, /* rdi */
126 [BPF_REG_2] = 6, /* rsi */
127 [BPF_REG_3] = 2, /* rdx */
128 [BPF_REG_4] = 1, /* rcx */
129 [BPF_REG_5] = 0, /* r8 */
130 [BPF_REG_6] = 3, /* rbx callee saved */
131 [BPF_REG_7] = 5, /* r13 callee saved */
132 [BPF_REG_8] = 6, /* r14 callee saved */
133 [BPF_REG_9] = 7, /* r15 callee saved */
134 [BPF_REG_FP] = 5, /* rbp readonly */
Daniel Borkmann959a7572016-05-13 19:08:33 +0200135 [BPF_REG_AX] = 2, /* r10 temp register */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700136 [AUX_REG] = 3, /* r11 temp register */
137};
138
139/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
140 * which need extra byte of encoding.
141 * rax,rcx,...,rbp have simpler encoding
142 */
Joe Perches5cccc702014-12-04 17:01:24 -0800143static bool is_ereg(u32 reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700144{
Joe Perchesd1481342014-12-04 15:00:48 -0800145 return (1 << reg) & (BIT(BPF_REG_5) |
146 BIT(AUX_REG) |
147 BIT(BPF_REG_7) |
148 BIT(BPF_REG_8) |
Daniel Borkmann959a7572016-05-13 19:08:33 +0200149 BIT(BPF_REG_9) |
150 BIT(BPF_REG_AX));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700151}
152
153/* add modifiers if 'reg' maps to x64 registers r8..r15 */
Joe Perches5cccc702014-12-04 17:01:24 -0800154static u8 add_1mod(u8 byte, u32 reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700155{
156 if (is_ereg(reg))
157 byte |= 1;
158 return byte;
159}
160
Joe Perches5cccc702014-12-04 17:01:24 -0800161static u8 add_2mod(u8 byte, u32 r1, u32 r2)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700162{
163 if (is_ereg(r1))
164 byte |= 1;
165 if (is_ereg(r2))
166 byte |= 4;
167 return byte;
168}
169
Alexei Starovoitove430f342014-06-06 14:46:06 -0700170/* encode 'dst_reg' register into x64 opcode 'byte' */
Joe Perches5cccc702014-12-04 17:01:24 -0800171static u8 add_1reg(u8 byte, u32 dst_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700172{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700173 return byte + reg2hex[dst_reg];
Alexei Starovoitov62258272014-05-13 19:50:46 -0700174}
175
Alexei Starovoitove430f342014-06-06 14:46:06 -0700176/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
Joe Perches5cccc702014-12-04 17:01:24 -0800177static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700178{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700179 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700180}
181
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200182static void jit_fill_hole(void *area, unsigned int size)
183{
184 /* fill whole space with int3 instructions */
185 memset(area, 0xcc, size);
186}
187
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700188struct jit_context {
Alexei Starovoitov769e0de2014-11-29 14:46:13 -0800189 int cleanup_addr; /* epilogue code offset */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700190 bool seen_ld_abs;
Daniel Borkmann959a7572016-05-13 19:08:33 +0200191 bool seen_ax_reg;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700192};
193
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700194/* maximum number of bytes emitted while JITing one eBPF insn */
195#define BPF_MAX_INSN_SIZE 128
196#define BPF_INSN_SAFETY 64
197
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700198#define STACKSIZE \
199 (MAX_BPF_STACK + \
200 32 /* space for rbx, r13, r14, r15 */ + \
201 8 /* space for skb_copy_bits() buffer */)
202
Daniel Borkmann8b614ae2015-12-17 23:51:54 +0100203#define PROLOGUE_SIZE 48
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700204
205/* emit x64 prologue code for BPF program and check it's size.
206 * bpf_tail_call helper will skip it while jumping into another program
207 */
208static void emit_prologue(u8 **pprog)
Eric Dumazet0a148422011-04-20 09:27:32 +0000209{
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700210 u8 *prog = *pprog;
211 int cnt = 0;
Eric Dumazet0a148422011-04-20 09:27:32 +0000212
Alexei Starovoitov62258272014-05-13 19:50:46 -0700213 EMIT1(0x55); /* push rbp */
214 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
Eric Dumazet0a148422011-04-20 09:27:32 +0000215
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700216 /* sub rsp, STACKSIZE */
217 EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
Eric Dumazet0a148422011-04-20 09:27:32 +0000218
Alexei Starovoitov62258272014-05-13 19:50:46 -0700219 /* all classic BPF filters use R6(rbx) save it */
Eric Dumazet0a148422011-04-20 09:27:32 +0000220
Alexei Starovoitov62258272014-05-13 19:50:46 -0700221 /* mov qword ptr [rbp-X],rbx */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700222 EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700223
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700224 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
Alexei Starovoitov62258272014-05-13 19:50:46 -0700225 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
226 * R8(r14). R9(r15) spill could be made conditional, but there is only
227 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
228 * The overhead of extra spill is negligible for any filter other
229 * than synthetic ones. Therefore not worth adding complexity.
230 */
231
232 /* mov qword ptr [rbp-X],r13 */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700233 EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700234 /* mov qword ptr [rbp-X],r14 */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700235 EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700236 /* mov qword ptr [rbp-X],r15 */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700237 EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700238
Daniel Borkmann8b614ae2015-12-17 23:51:54 +0100239 /* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
240 * we need to reset the counter to 0. It's done in two instructions,
241 * resetting rax register to 0 (xor on eax gets 0 extended), and
242 * moving it to the counter location.
243 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700244
Daniel Borkmann8b614ae2015-12-17 23:51:54 +0100245 /* xor eax, eax */
246 EMIT2(0x31, 0xc0);
247 /* mov qword ptr [rbp-X], rax */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700248 EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
249
250 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
251 *pprog = prog;
252}
253
254/* generate the following code:
255 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
256 * if (index >= array->map.max_entries)
257 * goto out;
258 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
259 * goto out;
Wang Nan2a36f0b2015-08-06 07:02:33 +0000260 * prog = array->ptrs[index];
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700261 * if (prog == NULL)
262 * goto out;
263 * goto *(prog->bpf_func + prologue_size);
264 * out:
265 */
266static void emit_bpf_tail_call(u8 **pprog)
267{
268 u8 *prog = *pprog;
269 int label1, label2, label3;
270 int cnt = 0;
271
272 /* rdi - pointer to ctx
273 * rsi - pointer to bpf_array
274 * rdx - index in bpf_array
275 */
276
277 /* if (index >= array->map.max_entries)
278 * goto out;
279 */
Alexei Starovoitov5226bb32018-01-29 02:48:55 +0100280 EMIT2(0x89, 0xD2); /* mov edx, edx */
281 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700282 offsetof(struct bpf_array, map.max_entries));
Daniel Borkmann017219b2018-03-08 16:17:34 +0100283#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700284 EMIT2(X86_JBE, OFFSET1); /* jbe out */
285 label1 = cnt;
286
287 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
288 * goto out;
289 */
290 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
291 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
Daniel Borkmann017219b2018-03-08 16:17:34 +0100292#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700293 EMIT2(X86_JA, OFFSET2); /* ja out */
294 label2 = cnt;
295 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
296 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
297
Wang Nan2a36f0b2015-08-06 07:02:33 +0000298 /* prog = array->ptrs[index]; */
Eric Dumazetc964ad32018-01-29 02:48:54 +0100299 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
Wang Nan2a36f0b2015-08-06 07:02:33 +0000300 offsetof(struct bpf_array, ptrs));
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700301
302 /* if (prog == NULL)
303 * goto out;
304 */
Eric Dumazetc964ad32018-01-29 02:48:54 +0100305 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
Daniel Borkmann017219b2018-03-08 16:17:34 +0100306#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700307 EMIT2(X86_JE, OFFSET3); /* je out */
308 label3 = cnt;
309
310 /* goto *(prog->bpf_func + prologue_size); */
311 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
312 offsetof(struct bpf_prog, bpf_func));
313 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
314
315 /* now we're ready to jump into next BPF program
316 * rdi == ctx (1st arg)
317 * rax == prog->bpf_func + prologue_size
318 */
Daniel Borkmann017219b2018-03-08 16:17:34 +0100319 RETPOLINE_RAX_BPF_JIT();
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700320
321 /* out: */
322 BUILD_BUG_ON(cnt - label1 != OFFSET1);
323 BUILD_BUG_ON(cnt - label2 != OFFSET2);
324 BUILD_BUG_ON(cnt - label3 != OFFSET3);
325 *pprog = prog;
326}
327
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700328
329static void emit_load_skb_data_hlen(u8 **pprog)
330{
331 u8 *prog = *pprog;
332 int cnt = 0;
333
334 /* r9d = skb->len - skb->data_len (headlen)
335 * r10 = skb->data
336 */
337 /* mov %r9d, off32(%rdi) */
338 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
339
340 /* sub %r9d, off32(%rdi) */
341 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
342
343 /* mov %r10, off32(%rdi) */
344 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
345 *pprog = prog;
346}
347
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700348static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
349 int oldproglen, struct jit_context *ctx)
350{
351 struct bpf_insn *insn = bpf_prog->insnsi;
352 int insn_cnt = bpf_prog->len;
353 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
Daniel Borkmann959a7572016-05-13 19:08:33 +0200354 bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700355 bool seen_exit = false;
356 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
357 int i, cnt = 0;
358 int proglen = 0;
359 u8 *prog = temp;
360
361 emit_prologue(&prog);
362
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700363 if (seen_ld_abs)
364 emit_load_skb_data_hlen(&prog);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700365
366 for (i = 0; i < insn_cnt; i++, insn++) {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700367 const s32 imm32 = insn->imm;
368 u32 dst_reg = insn->dst_reg;
369 u32 src_reg = insn->src_reg;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700370 u8 b1 = 0, b2 = 0, b3 = 0;
371 s64 jmp_offset;
372 u8 jmp_cond;
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700373 bool reload_skb_data;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700374 int ilen;
375 u8 *func;
376
Daniel Borkmann959a7572016-05-13 19:08:33 +0200377 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
378 ctx->seen_ax_reg = seen_ax_reg = true;
379
Alexei Starovoitov62258272014-05-13 19:50:46 -0700380 switch (insn->code) {
381 /* ALU */
382 case BPF_ALU | BPF_ADD | BPF_X:
383 case BPF_ALU | BPF_SUB | BPF_X:
384 case BPF_ALU | BPF_AND | BPF_X:
385 case BPF_ALU | BPF_OR | BPF_X:
386 case BPF_ALU | BPF_XOR | BPF_X:
387 case BPF_ALU64 | BPF_ADD | BPF_X:
388 case BPF_ALU64 | BPF_SUB | BPF_X:
389 case BPF_ALU64 | BPF_AND | BPF_X:
390 case BPF_ALU64 | BPF_OR | BPF_X:
391 case BPF_ALU64 | BPF_XOR | BPF_X:
392 switch (BPF_OP(insn->code)) {
393 case BPF_ADD: b2 = 0x01; break;
394 case BPF_SUB: b2 = 0x29; break;
395 case BPF_AND: b2 = 0x21; break;
396 case BPF_OR: b2 = 0x09; break;
397 case BPF_XOR: b2 = 0x31; break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000398 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700399 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700400 EMIT1(add_2mod(0x48, dst_reg, src_reg));
401 else if (is_ereg(dst_reg) || is_ereg(src_reg))
402 EMIT1(add_2mod(0x40, dst_reg, src_reg));
403 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000404 break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000405
Alexei Starovoitove430f342014-06-06 14:46:06 -0700406 /* mov dst, src */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700407 case BPF_ALU64 | BPF_MOV | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700408 EMIT_mov(dst_reg, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700409 break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000410
Alexei Starovoitove430f342014-06-06 14:46:06 -0700411 /* mov32 dst, src */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700412 case BPF_ALU | BPF_MOV | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700413 if (is_ereg(dst_reg) || is_ereg(src_reg))
414 EMIT1(add_2mod(0x40, dst_reg, src_reg));
415 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700416 break;
Eric Dumazet3b589082013-01-30 17:51:44 -0800417
Alexei Starovoitove430f342014-06-06 14:46:06 -0700418 /* neg dst */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700419 case BPF_ALU | BPF_NEG:
420 case BPF_ALU64 | BPF_NEG:
421 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700422 EMIT1(add_1mod(0x48, dst_reg));
423 else if (is_ereg(dst_reg))
424 EMIT1(add_1mod(0x40, dst_reg));
425 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700426 break;
427
428 case BPF_ALU | BPF_ADD | BPF_K:
429 case BPF_ALU | BPF_SUB | BPF_K:
430 case BPF_ALU | BPF_AND | BPF_K:
431 case BPF_ALU | BPF_OR | BPF_K:
432 case BPF_ALU | BPF_XOR | BPF_K:
433 case BPF_ALU64 | BPF_ADD | BPF_K:
434 case BPF_ALU64 | BPF_SUB | BPF_K:
435 case BPF_ALU64 | BPF_AND | BPF_K:
436 case BPF_ALU64 | BPF_OR | BPF_K:
437 case BPF_ALU64 | BPF_XOR | BPF_K:
438 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700439 EMIT1(add_1mod(0x48, dst_reg));
440 else if (is_ereg(dst_reg))
441 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700442
443 switch (BPF_OP(insn->code)) {
444 case BPF_ADD: b3 = 0xC0; break;
445 case BPF_SUB: b3 = 0xE8; break;
446 case BPF_AND: b3 = 0xE0; break;
447 case BPF_OR: b3 = 0xC8; break;
448 case BPF_XOR: b3 = 0xF0; break;
449 }
450
Alexei Starovoitove430f342014-06-06 14:46:06 -0700451 if (is_imm8(imm32))
452 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700453 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700454 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700455 break;
456
457 case BPF_ALU64 | BPF_MOV | BPF_K:
458 /* optimization: if imm32 is positive,
459 * use 'mov eax, imm32' (which zero-extends imm32)
460 * to save 2 bytes
461 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700462 if (imm32 < 0) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700463 /* 'mov rax, imm32' sign extends imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700464 b1 = add_1mod(0x48, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700465 b2 = 0xC7;
466 b3 = 0xC0;
Alexei Starovoitove430f342014-06-06 14:46:06 -0700467 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
Eric Dumazet3b589082013-01-30 17:51:44 -0800468 break;
469 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700470
471 case BPF_ALU | BPF_MOV | BPF_K:
Daniel Borkmann606c88a2015-12-17 23:51:56 +0100472 /* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
473 * to save 3 bytes.
474 */
475 if (imm32 == 0) {
476 if (is_ereg(dst_reg))
477 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
478 b2 = 0x31; /* xor */
479 b3 = 0xC0;
480 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
481 break;
482 }
483
Alexei Starovoitov62258272014-05-13 19:50:46 -0700484 /* mov %eax, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700485 if (is_ereg(dst_reg))
486 EMIT1(add_1mod(0x40, dst_reg));
487 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700488 break;
489
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700490 case BPF_LD | BPF_IMM | BPF_DW:
491 if (insn[1].code != 0 || insn[1].src_reg != 0 ||
492 insn[1].dst_reg != 0 || insn[1].off != 0) {
493 /* verifier must catch invalid insns */
494 pr_err("invalid BPF_LD_IMM64 insn\n");
495 return -EINVAL;
496 }
497
Daniel Borkmann606c88a2015-12-17 23:51:56 +0100498 /* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
499 * to save 7 bytes.
500 */
501 if (insn[0].imm == 0 && insn[1].imm == 0) {
502 b1 = add_2mod(0x48, dst_reg, dst_reg);
503 b2 = 0x31; /* xor */
504 b3 = 0xC0;
505 EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
506
507 insn++;
508 i++;
509 break;
510 }
511
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700512 /* movabsq %rax, imm64 */
513 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
514 EMIT(insn[0].imm, 4);
515 EMIT(insn[1].imm, 4);
516
517 insn++;
518 i++;
519 break;
520
Alexei Starovoitove430f342014-06-06 14:46:06 -0700521 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700522 case BPF_ALU | BPF_MOD | BPF_X:
523 case BPF_ALU | BPF_DIV | BPF_X:
524 case BPF_ALU | BPF_MOD | BPF_K:
525 case BPF_ALU | BPF_DIV | BPF_K:
526 case BPF_ALU64 | BPF_MOD | BPF_X:
527 case BPF_ALU64 | BPF_DIV | BPF_X:
528 case BPF_ALU64 | BPF_MOD | BPF_K:
529 case BPF_ALU64 | BPF_DIV | BPF_K:
530 EMIT1(0x50); /* push rax */
531 EMIT1(0x52); /* push rdx */
532
533 if (BPF_SRC(insn->code) == BPF_X)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700534 /* mov r11, src_reg */
535 EMIT_mov(AUX_REG, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700536 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700537 /* mov r11, imm32 */
538 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700539
Alexei Starovoitove430f342014-06-06 14:46:06 -0700540 /* mov rax, dst_reg */
541 EMIT_mov(BPF_REG_0, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700542
543 /* xor edx, edx
544 * equivalent to 'xor rdx, rdx', but one byte less
545 */
546 EMIT2(0x31, 0xd2);
547
548 if (BPF_SRC(insn->code) == BPF_X) {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700549 /* if (src_reg == 0) return 0 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700550
551 /* cmp r11, 0 */
552 EMIT4(0x49, 0x83, 0xFB, 0x00);
553
554 /* jne .+9 (skip over pop, pop, xor and jmp) */
555 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
556 EMIT1(0x5A); /* pop rdx */
557 EMIT1(0x58); /* pop rax */
558 EMIT2(0x31, 0xc0); /* xor eax, eax */
559
560 /* jmp cleanup_addr
561 * addrs[i] - 11, because there are 11 bytes
562 * after this insn: div, mov, pop, pop, mov
563 */
564 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
565 EMIT1_off32(0xE9, jmp_offset);
566 }
567
568 if (BPF_CLASS(insn->code) == BPF_ALU64)
569 /* div r11 */
570 EMIT3(0x49, 0xF7, 0xF3);
571 else
572 /* div r11d */
573 EMIT3(0x41, 0xF7, 0xF3);
574
575 if (BPF_OP(insn->code) == BPF_MOD)
576 /* mov r11, rdx */
577 EMIT3(0x49, 0x89, 0xD3);
578 else
579 /* mov r11, rax */
580 EMIT3(0x49, 0x89, 0xC3);
581
582 EMIT1(0x5A); /* pop rdx */
583 EMIT1(0x58); /* pop rax */
584
Alexei Starovoitove430f342014-06-06 14:46:06 -0700585 /* mov dst_reg, r11 */
586 EMIT_mov(dst_reg, AUX_REG);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700587 break;
588
589 case BPF_ALU | BPF_MUL | BPF_K:
590 case BPF_ALU | BPF_MUL | BPF_X:
591 case BPF_ALU64 | BPF_MUL | BPF_K:
592 case BPF_ALU64 | BPF_MUL | BPF_X:
593 EMIT1(0x50); /* push rax */
594 EMIT1(0x52); /* push rdx */
595
Alexei Starovoitove430f342014-06-06 14:46:06 -0700596 /* mov r11, dst_reg */
597 EMIT_mov(AUX_REG, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700598
599 if (BPF_SRC(insn->code) == BPF_X)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700600 /* mov rax, src_reg */
601 EMIT_mov(BPF_REG_0, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700602 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700603 /* mov rax, imm32 */
604 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700605
606 if (BPF_CLASS(insn->code) == BPF_ALU64)
607 EMIT1(add_1mod(0x48, AUX_REG));
608 else if (is_ereg(AUX_REG))
609 EMIT1(add_1mod(0x40, AUX_REG));
610 /* mul(q) r11 */
611 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
612
613 /* mov r11, rax */
614 EMIT_mov(AUX_REG, BPF_REG_0);
615
616 EMIT1(0x5A); /* pop rdx */
617 EMIT1(0x58); /* pop rax */
618
Alexei Starovoitove430f342014-06-06 14:46:06 -0700619 /* mov dst_reg, r11 */
620 EMIT_mov(dst_reg, AUX_REG);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700621 break;
622
623 /* shifts */
624 case BPF_ALU | BPF_LSH | BPF_K:
625 case BPF_ALU | BPF_RSH | BPF_K:
626 case BPF_ALU | BPF_ARSH | BPF_K:
627 case BPF_ALU64 | BPF_LSH | BPF_K:
628 case BPF_ALU64 | BPF_RSH | BPF_K:
629 case BPF_ALU64 | BPF_ARSH | BPF_K:
630 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700631 EMIT1(add_1mod(0x48, dst_reg));
632 else if (is_ereg(dst_reg))
633 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700634
635 switch (BPF_OP(insn->code)) {
636 case BPF_LSH: b3 = 0xE0; break;
637 case BPF_RSH: b3 = 0xE8; break;
638 case BPF_ARSH: b3 = 0xF8; break;
639 }
Alexei Starovoitove430f342014-06-06 14:46:06 -0700640 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700641 break;
642
Alexei Starovoitov72b603e2014-08-25 12:27:02 -0700643 case BPF_ALU | BPF_LSH | BPF_X:
644 case BPF_ALU | BPF_RSH | BPF_X:
645 case BPF_ALU | BPF_ARSH | BPF_X:
646 case BPF_ALU64 | BPF_LSH | BPF_X:
647 case BPF_ALU64 | BPF_RSH | BPF_X:
648 case BPF_ALU64 | BPF_ARSH | BPF_X:
649
650 /* check for bad case when dst_reg == rcx */
651 if (dst_reg == BPF_REG_4) {
652 /* mov r11, dst_reg */
653 EMIT_mov(AUX_REG, dst_reg);
654 dst_reg = AUX_REG;
655 }
656
657 if (src_reg != BPF_REG_4) { /* common case */
658 EMIT1(0x51); /* push rcx */
659
660 /* mov rcx, src_reg */
661 EMIT_mov(BPF_REG_4, src_reg);
662 }
663
664 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
665 if (BPF_CLASS(insn->code) == BPF_ALU64)
666 EMIT1(add_1mod(0x48, dst_reg));
667 else if (is_ereg(dst_reg))
668 EMIT1(add_1mod(0x40, dst_reg));
669
670 switch (BPF_OP(insn->code)) {
671 case BPF_LSH: b3 = 0xE0; break;
672 case BPF_RSH: b3 = 0xE8; break;
673 case BPF_ARSH: b3 = 0xF8; break;
674 }
675 EMIT2(0xD3, add_1reg(b3, dst_reg));
676
677 if (src_reg != BPF_REG_4)
678 EMIT1(0x59); /* pop rcx */
679
680 if (insn->dst_reg == BPF_REG_4)
681 /* mov dst_reg, r11 */
682 EMIT_mov(insn->dst_reg, AUX_REG);
683 break;
684
Alexei Starovoitov62258272014-05-13 19:50:46 -0700685 case BPF_ALU | BPF_END | BPF_FROM_BE:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700686 switch (imm32) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700687 case 16:
688 /* emit 'ror %ax, 8' to swap lower 2 bytes */
689 EMIT1(0x66);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700690 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700691 EMIT1(0x41);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700692 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700693
694 /* emit 'movzwl eax, ax' */
695 if (is_ereg(dst_reg))
696 EMIT3(0x45, 0x0F, 0xB7);
697 else
698 EMIT2(0x0F, 0xB7);
699 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000700 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700701 case 32:
702 /* emit 'bswap eax' to swap lower 4 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700703 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700704 EMIT2(0x41, 0x0F);
705 else
706 EMIT1(0x0F);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700707 EMIT1(add_1reg(0xC8, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000708 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700709 case 64:
710 /* emit 'bswap rax' to swap 8 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700711 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
712 add_1reg(0xC8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700713 break;
714 }
715 break;
716
717 case BPF_ALU | BPF_END | BPF_FROM_LE:
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700718 switch (imm32) {
719 case 16:
720 /* emit 'movzwl eax, ax' to zero extend 16-bit
721 * into 64 bit
722 */
723 if (is_ereg(dst_reg))
724 EMIT3(0x45, 0x0F, 0xB7);
725 else
726 EMIT2(0x0F, 0xB7);
727 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
728 break;
729 case 32:
730 /* emit 'mov eax, eax' to clear upper 32-bits */
731 if (is_ereg(dst_reg))
732 EMIT1(0x45);
733 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
734 break;
735 case 64:
736 /* nop */
737 break;
738 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700739 break;
740
Alexei Starovoitove430f342014-06-06 14:46:06 -0700741 /* ST: *(u8*)(dst_reg + off) = imm */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700742 case BPF_ST | BPF_MEM | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700743 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700744 EMIT2(0x41, 0xC6);
745 else
746 EMIT1(0xC6);
747 goto st;
748 case BPF_ST | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700749 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700750 EMIT3(0x66, 0x41, 0xC7);
751 else
752 EMIT2(0x66, 0xC7);
753 goto st;
754 case BPF_ST | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700755 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700756 EMIT2(0x41, 0xC7);
757 else
758 EMIT1(0xC7);
759 goto st;
760 case BPF_ST | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700761 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700762
763st: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700764 EMIT2(add_1reg(0x40, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700765 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700766 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700767
Alexei Starovoitove430f342014-06-06 14:46:06 -0700768 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700769 break;
770
Alexei Starovoitove430f342014-06-06 14:46:06 -0700771 /* STX: *(u8*)(dst_reg + off) = src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700772 case BPF_STX | BPF_MEM | BPF_B:
773 /* emit 'mov byte ptr [rax + off], al' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700774 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
Alexei Starovoitov62258272014-05-13 19:50:46 -0700775 /* have to add extra byte for x86 SIL, DIL regs */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700776 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
777 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700778 else
779 EMIT1(0x88);
780 goto stx;
781 case BPF_STX | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700782 if (is_ereg(dst_reg) || is_ereg(src_reg))
783 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700784 else
785 EMIT2(0x66, 0x89);
786 goto stx;
787 case BPF_STX | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700788 if (is_ereg(dst_reg) || is_ereg(src_reg))
789 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700790 else
791 EMIT1(0x89);
792 goto stx;
793 case BPF_STX | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700794 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700795stx: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700796 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700797 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700798 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700799 insn->off);
800 break;
801
Alexei Starovoitove430f342014-06-06 14:46:06 -0700802 /* LDX: dst_reg = *(u8*)(src_reg + off) */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700803 case BPF_LDX | BPF_MEM | BPF_B:
804 /* emit 'movzx rax, byte ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700805 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700806 goto ldx;
807 case BPF_LDX | BPF_MEM | BPF_H:
808 /* emit 'movzx rax, word ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700809 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700810 goto ldx;
811 case BPF_LDX | BPF_MEM | BPF_W:
812 /* emit 'mov eax, dword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700813 if (is_ereg(dst_reg) || is_ereg(src_reg))
814 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700815 else
816 EMIT1(0x8B);
817 goto ldx;
818 case BPF_LDX | BPF_MEM | BPF_DW:
819 /* emit 'mov rax, qword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700820 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700821ldx: /* if insn->off == 0 we can save one extra byte, but
822 * special case of x86 r13 which always needs an offset
823 * is not worth the hassle
824 */
825 if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700826 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700827 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700828 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700829 insn->off);
830 break;
831
Alexei Starovoitove430f342014-06-06 14:46:06 -0700832 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700833 case BPF_STX | BPF_XADD | BPF_W:
834 /* emit 'lock add dword ptr [rax + off], eax' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700835 if (is_ereg(dst_reg) || is_ereg(src_reg))
836 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700837 else
838 EMIT2(0xF0, 0x01);
839 goto xadd;
840 case BPF_STX | BPF_XADD | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700841 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700842xadd: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700843 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700844 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700845 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700846 insn->off);
847 break;
848
849 /* call */
850 case BPF_JMP | BPF_CALL:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700851 func = (u8 *) __bpf_call_base + imm32;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700852 jmp_offset = func - (image + addrs[i]);
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700853 if (seen_ld_abs) {
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700854 reload_skb_data = bpf_helper_changes_skb_data(func);
855 if (reload_skb_data) {
856 EMIT1(0x57); /* push %rdi */
857 jmp_offset += 22; /* pop, mov, sub, mov */
858 } else {
859 EMIT2(0x41, 0x52); /* push %r10 */
860 EMIT2(0x41, 0x51); /* push %r9 */
861 /* need to adjust jmp offset, since
862 * pop %r9, pop %r10 take 4 bytes after call insn
863 */
864 jmp_offset += 4;
865 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700866 }
Alexei Starovoitove430f342014-06-06 14:46:06 -0700867 if (!imm32 || !is_simm32(jmp_offset)) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700868 pr_err("unsupported bpf func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -0700869 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700870 return -EINVAL;
871 }
872 EMIT1_off32(0xE8, jmp_offset);
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700873 if (seen_ld_abs) {
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700874 if (reload_skb_data) {
875 EMIT1(0x5F); /* pop %rdi */
876 emit_load_skb_data_hlen(&prog);
877 } else {
878 EMIT2(0x41, 0x59); /* pop %r9 */
879 EMIT2(0x41, 0x5A); /* pop %r10 */
880 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700881 }
882 break;
883
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700884 case BPF_JMP | BPF_CALL | BPF_X:
885 emit_bpf_tail_call(&prog);
886 break;
887
Alexei Starovoitov62258272014-05-13 19:50:46 -0700888 /* cond jump */
889 case BPF_JMP | BPF_JEQ | BPF_X:
890 case BPF_JMP | BPF_JNE | BPF_X:
891 case BPF_JMP | BPF_JGT | BPF_X:
892 case BPF_JMP | BPF_JGE | BPF_X:
893 case BPF_JMP | BPF_JSGT | BPF_X:
894 case BPF_JMP | BPF_JSGE | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700895 /* cmp dst_reg, src_reg */
896 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
897 add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700898 goto emit_cond_jmp;
899
900 case BPF_JMP | BPF_JSET | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700901 /* test dst_reg, src_reg */
902 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
903 add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700904 goto emit_cond_jmp;
905
906 case BPF_JMP | BPF_JSET | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700907 /* test dst_reg, imm32 */
908 EMIT1(add_1mod(0x48, dst_reg));
909 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700910 goto emit_cond_jmp;
911
912 case BPF_JMP | BPF_JEQ | BPF_K:
913 case BPF_JMP | BPF_JNE | BPF_K:
914 case BPF_JMP | BPF_JGT | BPF_K:
915 case BPF_JMP | BPF_JGE | BPF_K:
916 case BPF_JMP | BPF_JSGT | BPF_K:
917 case BPF_JMP | BPF_JSGE | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700918 /* cmp dst_reg, imm8/32 */
919 EMIT1(add_1mod(0x48, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700920
Alexei Starovoitove430f342014-06-06 14:46:06 -0700921 if (is_imm8(imm32))
922 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700923 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700924 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700925
926emit_cond_jmp: /* convert BPF opcode to x86 */
927 switch (BPF_OP(insn->code)) {
928 case BPF_JEQ:
929 jmp_cond = X86_JE;
930 break;
931 case BPF_JSET:
932 case BPF_JNE:
933 jmp_cond = X86_JNE;
934 break;
935 case BPF_JGT:
936 /* GT is unsigned '>', JA in x86 */
937 jmp_cond = X86_JA;
938 break;
939 case BPF_JGE:
940 /* GE is unsigned '>=', JAE in x86 */
941 jmp_cond = X86_JAE;
942 break;
943 case BPF_JSGT:
944 /* signed '>', GT in x86 */
945 jmp_cond = X86_JG;
946 break;
947 case BPF_JSGE:
948 /* signed '>=', GE in x86 */
949 jmp_cond = X86_JGE;
950 break;
951 default: /* to silence gcc warning */
952 return -EFAULT;
953 }
954 jmp_offset = addrs[i + insn->off] - addrs[i];
955 if (is_imm8(jmp_offset)) {
956 EMIT2(jmp_cond, jmp_offset);
957 } else if (is_simm32(jmp_offset)) {
958 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
959 } else {
960 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
961 return -EFAULT;
962 }
963
964 break;
965
966 case BPF_JMP | BPF_JA:
967 jmp_offset = addrs[i + insn->off] - addrs[i];
968 if (!jmp_offset)
969 /* optimize out nop jumps */
970 break;
971emit_jmp:
972 if (is_imm8(jmp_offset)) {
973 EMIT2(0xEB, jmp_offset);
974 } else if (is_simm32(jmp_offset)) {
975 EMIT1_off32(0xE9, jmp_offset);
976 } else {
977 pr_err("jmp gen bug %llx\n", jmp_offset);
978 return -EFAULT;
979 }
980 break;
981
982 case BPF_LD | BPF_IND | BPF_W:
983 func = sk_load_word;
984 goto common_load;
985 case BPF_LD | BPF_ABS | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700986 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700987common_load:
988 ctx->seen_ld_abs = seen_ld_abs = true;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700989 jmp_offset = func - (image + addrs[i]);
990 if (!func || !is_simm32(jmp_offset)) {
991 pr_err("unsupported bpf func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -0700992 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700993 return -EINVAL;
994 }
995 if (BPF_MODE(insn->code) == BPF_ABS) {
996 /* mov %esi, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700997 EMIT1_off32(0xBE, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700998 } else {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700999 /* mov %rsi, src_reg */
1000 EMIT_mov(BPF_REG_2, src_reg);
1001 if (imm32) {
1002 if (is_imm8(imm32))
Alexei Starovoitov62258272014-05-13 19:50:46 -07001003 /* add %esi, imm8 */
Alexei Starovoitove430f342014-06-06 14:46:06 -07001004 EMIT3(0x83, 0xC6, imm32);
Eric Dumazet0a148422011-04-20 09:27:32 +00001005 else
Alexei Starovoitov62258272014-05-13 19:50:46 -07001006 /* add %esi, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -07001007 EMIT2_off32(0x81, 0xC6, imm32);
Eric Dumazet0a148422011-04-20 09:27:32 +00001008 }
Alexei Starovoitov62258272014-05-13 19:50:46 -07001009 }
1010 /* skb pointer is in R6 (%rbx), it will be copied into
1011 * %rdi if skb_copy_bits() call is necessary.
1012 * sk_load_* helpers also use %r10 and %r9d.
1013 * See bpf_jit.S
1014 */
Daniel Borkmann959a7572016-05-13 19:08:33 +02001015 if (seen_ax_reg)
1016 /* r10 = skb->data, mov %r10, off32(%rbx) */
1017 EMIT3_off32(0x4c, 0x8b, 0x93,
1018 offsetof(struct sk_buff, data));
Alexei Starovoitov62258272014-05-13 19:50:46 -07001019 EMIT1_off32(0xE8, jmp_offset); /* call */
1020 break;
1021
1022 case BPF_LD | BPF_IND | BPF_H:
1023 func = sk_load_half;
1024 goto common_load;
1025 case BPF_LD | BPF_ABS | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -07001026 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001027 goto common_load;
1028 case BPF_LD | BPF_IND | BPF_B:
1029 func = sk_load_byte;
1030 goto common_load;
1031 case BPF_LD | BPF_ABS | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -07001032 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001033 goto common_load;
1034
1035 case BPF_JMP | BPF_EXIT:
Alexei Starovoitov769e0de2014-11-29 14:46:13 -08001036 if (seen_exit) {
Alexei Starovoitov62258272014-05-13 19:50:46 -07001037 jmp_offset = ctx->cleanup_addr - addrs[i];
1038 goto emit_jmp;
1039 }
Alexei Starovoitov769e0de2014-11-29 14:46:13 -08001040 seen_exit = true;
Alexei Starovoitov62258272014-05-13 19:50:46 -07001041 /* update cleanup_addr */
1042 ctx->cleanup_addr = proglen;
1043 /* mov rbx, qword ptr [rbp-X] */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -07001044 EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001045 /* mov r13, qword ptr [rbp-X] */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -07001046 EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001047 /* mov r14, qword ptr [rbp-X] */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -07001048 EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001049 /* mov r15, qword ptr [rbp-X] */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -07001050 EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001051
1052 EMIT1(0xC9); /* leave */
1053 EMIT1(0xC3); /* ret */
1054 break;
1055
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001056 default:
Alexei Starovoitov62258272014-05-13 19:50:46 -07001057 /* By design x64 JIT should support all BPF instructions
1058 * This error will be seen if new instruction was added
1059 * to interpreter, but not to JIT
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001060 * or if there is junk in bpf_prog
Alexei Starovoitov62258272014-05-13 19:50:46 -07001061 */
1062 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001063 return -EINVAL;
Eric Dumazet0a148422011-04-20 09:27:32 +00001064 }
Alexei Starovoitov62258272014-05-13 19:50:46 -07001065
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001066 ilen = prog - temp;
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001067 if (ilen > BPF_MAX_INSN_SIZE) {
1068 pr_err("bpf_jit_compile fatal insn size error\n");
1069 return -EFAULT;
1070 }
1071
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001072 if (image) {
1073 if (unlikely(proglen + ilen > oldproglen)) {
Alexei Starovoitov62258272014-05-13 19:50:46 -07001074 pr_err("bpf_jit_compile fatal error\n");
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001075 return -EFAULT;
1076 }
1077 memcpy(image + proglen, temp, ilen);
1078 }
1079 proglen += ilen;
1080 addrs[i] = proglen;
1081 prog = temp;
1082 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001083 return proglen;
1084}
1085
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001086void bpf_jit_compile(struct bpf_prog *prog)
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001087{
Alexei Starovoitov62258272014-05-13 19:50:46 -07001088}
1089
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001090struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
Alexei Starovoitov62258272014-05-13 19:50:46 -07001091{
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001092 struct bpf_binary_header *header = NULL;
Daniel Borkmann959a7572016-05-13 19:08:33 +02001093 struct bpf_prog *tmp, *orig_prog = prog;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001094 int proglen, oldproglen = 0;
1095 struct jit_context ctx = {};
Daniel Borkmann959a7572016-05-13 19:08:33 +02001096 bool tmp_blinded = false;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001097 u8 *image = NULL;
1098 int *addrs;
1099 int pass;
1100 int i;
1101
1102 if (!bpf_jit_enable)
Daniel Borkmann959a7572016-05-13 19:08:33 +02001103 return orig_prog;
1104
1105 tmp = bpf_jit_blind_constants(prog);
1106 /* If blinding was requested and we failed during blinding,
1107 * we must fall back to the interpreter.
1108 */
1109 if (IS_ERR(tmp))
1110 return orig_prog;
1111 if (tmp != prog) {
1112 tmp_blinded = true;
1113 prog = tmp;
1114 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001115
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001116 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001117 if (!addrs) {
1118 prog = orig_prog;
1119 goto out;
1120 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001121
1122 /* Before first pass, make a rough estimation of addrs[]
1123 * each bpf instruction is translated to less than 64 bytes
1124 */
1125 for (proglen = 0, i = 0; i < prog->len; i++) {
1126 proglen += 64;
1127 addrs[i] = proglen;
1128 }
1129 ctx.cleanup_addr = proglen;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001130
Alexei Starovoitov3f7352b2015-05-22 15:42:55 -07001131 /* JITed image shrinks with every pass and the loop iterates
1132 * until the image stops shrinking. Very large bpf programs
1133 * may converge on the last pass. In such case do one more
1134 * pass to emit the final image
1135 */
Daniel Borkmannc9e30712018-03-07 22:10:01 +01001136 for (pass = 0; pass < 20 || image; pass++) {
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001137 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1138 if (proglen <= 0) {
1139 image = NULL;
1140 if (header)
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001141 bpf_jit_binary_free(header);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001142 prog = orig_prog;
1143 goto out_addrs;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001144 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001145 if (image) {
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001146 if (proglen != oldproglen) {
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001147 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1148 proglen, oldproglen);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001149 prog = orig_prog;
1150 goto out_addrs;
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001151 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001152 break;
1153 }
1154 if (proglen == oldproglen) {
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001155 header = bpf_jit_binary_alloc(proglen, &image,
1156 1, jit_fill_hole);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001157 if (!header) {
1158 prog = orig_prog;
1159 goto out_addrs;
1160 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001161 }
1162 oldproglen = proglen;
Daniel Borkmannc9e30712018-03-07 22:10:01 +01001163 cond_resched();
Eric Dumazet0a148422011-04-20 09:27:32 +00001164 }
Daniel Borkmann79617802013-03-21 22:22:03 +01001165
Eric Dumazet0a148422011-04-20 09:27:32 +00001166 if (bpf_jit_enable > 1)
Daniel Borkmann485d6512015-07-30 12:42:48 +02001167 bpf_jit_dump(prog->len, proglen, pass + 1, image);
Eric Dumazet0a148422011-04-20 09:27:32 +00001168
1169 if (image) {
Eric Dumazet314beb92013-05-17 16:37:03 +00001170 bpf_flush_icache(header, image + proglen);
1171 set_memory_ro((unsigned long)header, header->pages);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001172 prog->bpf_func = (void *)image;
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001173 prog->jited = 1;
Daniel Borkmannb55f6ca2017-01-07 00:26:33 +01001174 } else {
1175 prog = orig_prog;
Eric Dumazet0a148422011-04-20 09:27:32 +00001176 }
Daniel Borkmann959a7572016-05-13 19:08:33 +02001177
1178out_addrs:
Eric Dumazet0a148422011-04-20 09:27:32 +00001179 kfree(addrs);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001180out:
1181 if (tmp_blinded)
1182 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1183 tmp : orig_prog);
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001184 return prog;
Eric Dumazet0a148422011-04-20 09:27:32 +00001185}
1186
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001187void bpf_jit_free(struct bpf_prog *fp)
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -07001188{
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -07001189 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1190 struct bpf_binary_header *header = (void *)addr;
1191
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001192 if (!fp->jited)
1193 goto free_filter;
1194
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -07001195 set_memory_rw(addr, header->pages);
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001196 bpf_jit_binary_free(header);
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -07001197
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001198free_filter:
1199 bpf_prog_unlock_free(fp);
Eric Dumazet0a148422011-04-20 09:27:32 +00001200}