blob: 70f9748da7aaaedf1e1ae1ec3c26ab3497029436 [file] [log] [blame]
Eric Dumazet0a148422011-04-20 09:27:32 +00001/* bpf_jit_comp.c : BPF JIT compiler
2 *
Eric Dumazet3b589082013-01-30 17:51:44 -08003 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
Alexei Starovoitov62258272014-05-13 19:50:46 -07004 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Eric Dumazet0a148422011-04-20 09:27:32 +00005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
Eric Dumazet0a148422011-04-20 09:27:32 +000011#include <linux/netdevice.h>
12#include <linux/filter.h>
Eric Dumazet855ddb52012-10-27 02:26:22 +000013#include <linux/if_vlan.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020014#include <asm/cacheflush.h>
Laura Abbottd1163652017-05-08 15:58:11 -070015#include <asm/set_memory.h>
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -070016#include <linux/bpf.h>
Eric Dumazet0a148422011-04-20 09:27:32 +000017
Eric Dumazet0a148422011-04-20 09:27:32 +000018/*
19 * assembly code in arch/x86/net/bpf_jit.S
20 */
Alexei Starovoitov62258272014-05-13 19:50:46 -070021extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
Jan Seifferta998d432012-03-30 05:24:05 +000022extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
Alexei Starovoitov62258272014-05-13 19:50:46 -070023extern u8 sk_load_byte_positive_offset[];
Jan Seifferta998d432012-03-30 05:24:05 +000024extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
Alexei Starovoitov62258272014-05-13 19:50:46 -070025extern u8 sk_load_byte_negative_offset[];
Eric Dumazet0a148422011-04-20 09:27:32 +000026
Joe Perches5cccc702014-12-04 17:01:24 -080027static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
Eric Dumazet0a148422011-04-20 09:27:32 +000028{
29 if (len == 1)
30 *ptr = bytes;
31 else if (len == 2)
32 *(u16 *)ptr = bytes;
33 else {
34 *(u32 *)ptr = bytes;
35 barrier();
36 }
37 return ptr + len;
38}
39
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -070040#define EMIT(bytes, len) \
41 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000042
43#define EMIT1(b1) EMIT(b1, 1)
44#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
45#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
46#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
Alexei Starovoitov62258272014-05-13 19:50:46 -070047#define EMIT1_off32(b1, off) \
48 do {EMIT1(b1); EMIT(off, 4); } while (0)
49#define EMIT2_off32(b1, b2, off) \
50 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
51#define EMIT3_off32(b1, b2, b3, off) \
52 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
53#define EMIT4_off32(b1, b2, b3, b4, off) \
54 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000055
Joe Perches5cccc702014-12-04 17:01:24 -080056static bool is_imm8(int value)
Eric Dumazet0a148422011-04-20 09:27:32 +000057{
58 return value <= 127 && value >= -128;
59}
60
Joe Perches5cccc702014-12-04 17:01:24 -080061static bool is_simm32(s64 value)
Eric Dumazet0a148422011-04-20 09:27:32 +000062{
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +010063 return value == (s64)(s32)value;
64}
65
66static bool is_uimm32(u64 value)
67{
68 return value == (u64)(u32)value;
Eric Dumazet0a148422011-04-20 09:27:32 +000069}
70
Alexei Starovoitove430f342014-06-06 14:46:06 -070071/* mov dst, src */
72#define EMIT_mov(DST, SRC) \
73 do {if (DST != SRC) \
74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
Alexei Starovoitov62258272014-05-13 19:50:46 -070075 } while (0)
76
77static int bpf_size_to_x86_bytes(int bpf_size)
78{
79 if (bpf_size == BPF_W)
80 return 4;
81 else if (bpf_size == BPF_H)
82 return 2;
83 else if (bpf_size == BPF_B)
84 return 1;
85 else if (bpf_size == BPF_DW)
86 return 4; /* imm32 */
87 else
88 return 0;
89}
Eric Dumazet0a148422011-04-20 09:27:32 +000090
91/* list of x86 cond jumps opcodes (. + s8)
92 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
93 */
94#define X86_JB 0x72
95#define X86_JAE 0x73
96#define X86_JE 0x74
97#define X86_JNE 0x75
98#define X86_JBE 0x76
99#define X86_JA 0x77
Daniel Borkmann52afc512017-08-10 01:39:56 +0200100#define X86_JL 0x7C
Alexei Starovoitov62258272014-05-13 19:50:46 -0700101#define X86_JGE 0x7D
Daniel Borkmann52afc512017-08-10 01:39:56 +0200102#define X86_JLE 0x7E
Alexei Starovoitov62258272014-05-13 19:50:46 -0700103#define X86_JG 0x7F
Eric Dumazet0a148422011-04-20 09:27:32 +0000104
Joe Perches5cccc702014-12-04 17:01:24 -0800105static void bpf_flush_icache(void *start, void *end)
Eric Dumazet0a148422011-04-20 09:27:32 +0000106{
107 mm_segment_t old_fs = get_fs();
108
109 set_fs(KERNEL_DS);
110 smp_wmb();
111 flush_icache_range((unsigned long)start, (unsigned long)end);
112 set_fs(old_fs);
113}
114
Jan Seifferta998d432012-03-30 05:24:05 +0000115#define CHOOSE_LOAD_FUNC(K, func) \
116 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
Eric Dumazet0a148422011-04-20 09:27:32 +0000117
Alexei Starovoitov62258272014-05-13 19:50:46 -0700118/* pick a register outside of BPF range for JIT internal work */
Daniel Borkmann959a7572016-05-13 19:08:33 +0200119#define AUX_REG (MAX_BPF_JIT_REG + 1)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700120
Daniel Borkmann959a7572016-05-13 19:08:33 +0200121/* The following table maps BPF registers to x64 registers.
122 *
123 * x64 register r12 is unused, since if used as base address
124 * register in load/store instructions, it always needs an
125 * extra byte of encoding and is callee saved.
126 *
127 * r9 caches skb->len - skb->data_len
128 * r10 caches skb->data, and used for blinding (if enabled)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700129 */
130static const int reg2hex[] = {
131 [BPF_REG_0] = 0, /* rax */
132 [BPF_REG_1] = 7, /* rdi */
133 [BPF_REG_2] = 6, /* rsi */
134 [BPF_REG_3] = 2, /* rdx */
135 [BPF_REG_4] = 1, /* rcx */
136 [BPF_REG_5] = 0, /* r8 */
137 [BPF_REG_6] = 3, /* rbx callee saved */
138 [BPF_REG_7] = 5, /* r13 callee saved */
139 [BPF_REG_8] = 6, /* r14 callee saved */
140 [BPF_REG_9] = 7, /* r15 callee saved */
141 [BPF_REG_FP] = 5, /* rbp readonly */
Daniel Borkmann959a7572016-05-13 19:08:33 +0200142 [BPF_REG_AX] = 2, /* r10 temp register */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700143 [AUX_REG] = 3, /* r11 temp register */
144};
145
146/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
147 * which need extra byte of encoding.
148 * rax,rcx,...,rbp have simpler encoding
149 */
Joe Perches5cccc702014-12-04 17:01:24 -0800150static bool is_ereg(u32 reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700151{
Joe Perchesd1481342014-12-04 15:00:48 -0800152 return (1 << reg) & (BIT(BPF_REG_5) |
153 BIT(AUX_REG) |
154 BIT(BPF_REG_7) |
155 BIT(BPF_REG_8) |
Daniel Borkmann959a7572016-05-13 19:08:33 +0200156 BIT(BPF_REG_9) |
157 BIT(BPF_REG_AX));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700158}
159
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100160static bool is_axreg(u32 reg)
161{
162 return reg == BPF_REG_0;
163}
164
Alexei Starovoitov62258272014-05-13 19:50:46 -0700165/* add modifiers if 'reg' maps to x64 registers r8..r15 */
Joe Perches5cccc702014-12-04 17:01:24 -0800166static u8 add_1mod(u8 byte, u32 reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700167{
168 if (is_ereg(reg))
169 byte |= 1;
170 return byte;
171}
172
Joe Perches5cccc702014-12-04 17:01:24 -0800173static u8 add_2mod(u8 byte, u32 r1, u32 r2)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700174{
175 if (is_ereg(r1))
176 byte |= 1;
177 if (is_ereg(r2))
178 byte |= 4;
179 return byte;
180}
181
Alexei Starovoitove430f342014-06-06 14:46:06 -0700182/* encode 'dst_reg' register into x64 opcode 'byte' */
Joe Perches5cccc702014-12-04 17:01:24 -0800183static u8 add_1reg(u8 byte, u32 dst_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700184{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700185 return byte + reg2hex[dst_reg];
Alexei Starovoitov62258272014-05-13 19:50:46 -0700186}
187
Alexei Starovoitove430f342014-06-06 14:46:06 -0700188/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
Joe Perches5cccc702014-12-04 17:01:24 -0800189static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700190{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700191 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700192}
193
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200194static void jit_fill_hole(void *area, unsigned int size)
195{
196 /* fill whole space with int3 instructions */
197 memset(area, 0xcc, size);
198}
199
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700200struct jit_context {
Alexei Starovoitov769e0de2014-11-29 14:46:13 -0800201 int cleanup_addr; /* epilogue code offset */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700202 bool seen_ld_abs;
Daniel Borkmann959a7572016-05-13 19:08:33 +0200203 bool seen_ax_reg;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700204};
205
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700206/* maximum number of bytes emitted while JITing one eBPF insn */
207#define BPF_MAX_INSN_SIZE 128
208#define BPF_INSN_SAFETY 64
209
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700210#define AUX_STACK_SPACE \
211 (32 /* space for rbx, r13, r14, r15 */ + \
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700212 8 /* space for skb_copy_bits() buffer */)
213
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700214#define PROLOGUE_SIZE 37
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700215
216/* emit x64 prologue code for BPF program and check it's size.
217 * bpf_tail_call helper will skip it while jumping into another program
218 */
Daniel Borkmann08691752018-02-24 01:08:02 +0100219static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
Eric Dumazet0a148422011-04-20 09:27:32 +0000220{
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700221 u8 *prog = *pprog;
222 int cnt = 0;
Eric Dumazet0a148422011-04-20 09:27:32 +0000223
Alexei Starovoitov62258272014-05-13 19:50:46 -0700224 EMIT1(0x55); /* push rbp */
225 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
Eric Dumazet0a148422011-04-20 09:27:32 +0000226
Alexei Starovoitov2960ae42017-05-30 13:31:35 -0700227 /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
228 EMIT3_off32(0x48, 0x81, 0xEC,
229 round_up(stack_depth, 8) + AUX_STACK_SPACE);
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700230
231 /* sub rbp, AUX_STACK_SPACE */
232 EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
Eric Dumazet0a148422011-04-20 09:27:32 +0000233
Alexei Starovoitov62258272014-05-13 19:50:46 -0700234 /* all classic BPF filters use R6(rbx) save it */
Eric Dumazet0a148422011-04-20 09:27:32 +0000235
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700236 /* mov qword ptr [rbp+0],rbx */
237 EMIT4(0x48, 0x89, 0x5D, 0);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700238
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700239 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
Alexei Starovoitov62258272014-05-13 19:50:46 -0700240 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
241 * R8(r14). R9(r15) spill could be made conditional, but there is only
242 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
243 * The overhead of extra spill is negligible for any filter other
244 * than synthetic ones. Therefore not worth adding complexity.
245 */
246
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700247 /* mov qword ptr [rbp+8],r13 */
248 EMIT4(0x4C, 0x89, 0x6D, 8);
249 /* mov qword ptr [rbp+16],r14 */
250 EMIT4(0x4C, 0x89, 0x75, 16);
251 /* mov qword ptr [rbp+24],r15 */
252 EMIT4(0x4C, 0x89, 0x7D, 24);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700253
Daniel Borkmann08691752018-02-24 01:08:02 +0100254 if (!ebpf_from_cbpf) {
255 /* Clear the tail call counter (tail_call_cnt): for eBPF tail
256 * calls we need to reset the counter to 0. It's done in two
257 * instructions, resetting rax register to 0, and moving it
258 * to the counter location.
259 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700260
Daniel Borkmann08691752018-02-24 01:08:02 +0100261 /* xor eax, eax */
262 EMIT2(0x31, 0xc0);
263 /* mov qword ptr [rbp+32], rax */
264 EMIT4(0x48, 0x89, 0x45, 32);
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700265
Daniel Borkmann08691752018-02-24 01:08:02 +0100266 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
267 }
268
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700269 *pprog = prog;
270}
271
272/* generate the following code:
273 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
274 * if (index >= array->map.max_entries)
275 * goto out;
276 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
277 * goto out;
Wang Nan2a36f0b2015-08-06 07:02:33 +0000278 * prog = array->ptrs[index];
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700279 * if (prog == NULL)
280 * goto out;
281 * goto *(prog->bpf_func + prologue_size);
282 * out:
283 */
284static void emit_bpf_tail_call(u8 **pprog)
285{
286 u8 *prog = *pprog;
287 int label1, label2, label3;
288 int cnt = 0;
289
290 /* rdi - pointer to ctx
291 * rsi - pointer to bpf_array
292 * rdx - index in bpf_array
293 */
294
295 /* if (index >= array->map.max_entries)
296 * goto out;
297 */
Alexei Starovoitov90caccd2017-10-03 15:37:20 -0700298 EMIT2(0x89, 0xD2); /* mov edx, edx */
299 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700300 offsetof(struct bpf_array, map.max_entries));
Eric Dumazet84ccac62017-08-31 04:53:42 -0700301#define OFFSET1 43 /* number of bytes to jump */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700302 EMIT2(X86_JBE, OFFSET1); /* jbe out */
303 label1 = cnt;
304
305 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
306 * goto out;
307 */
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700308 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700309 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
Eric Dumazet84ccac62017-08-31 04:53:42 -0700310#define OFFSET2 32
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700311 EMIT2(X86_JA, OFFSET2); /* ja out */
312 label2 = cnt;
313 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700314 EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700315
Wang Nan2a36f0b2015-08-06 07:02:33 +0000316 /* prog = array->ptrs[index]; */
Eric Dumazet84ccac62017-08-31 04:53:42 -0700317 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
Wang Nan2a36f0b2015-08-06 07:02:33 +0000318 offsetof(struct bpf_array, ptrs));
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700319
320 /* if (prog == NULL)
321 * goto out;
322 */
Eric Dumazet84ccac62017-08-31 04:53:42 -0700323 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700324#define OFFSET3 10
325 EMIT2(X86_JE, OFFSET3); /* je out */
326 label3 = cnt;
327
328 /* goto *(prog->bpf_func + prologue_size); */
329 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
330 offsetof(struct bpf_prog, bpf_func));
331 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
332
333 /* now we're ready to jump into next BPF program
334 * rdi == ctx (1st arg)
335 * rax == prog->bpf_func + prologue_size
336 */
337 EMIT2(0xFF, 0xE0); /* jmp rax */
338
339 /* out: */
340 BUILD_BUG_ON(cnt - label1 != OFFSET1);
341 BUILD_BUG_ON(cnt - label2 != OFFSET2);
342 BUILD_BUG_ON(cnt - label3 != OFFSET3);
343 *pprog = prog;
344}
345
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700346
347static void emit_load_skb_data_hlen(u8 **pprog)
348{
349 u8 *prog = *pprog;
350 int cnt = 0;
351
352 /* r9d = skb->len - skb->data_len (headlen)
353 * r10 = skb->data
354 */
355 /* mov %r9d, off32(%rdi) */
356 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
357
358 /* sub %r9d, off32(%rdi) */
359 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
360
361 /* mov %r10, off32(%rdi) */
362 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
363 *pprog = prog;
364}
365
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100366static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
367 u32 dst_reg, const u32 imm32)
368{
369 u8 *prog = *pprog;
370 u8 b1, b2, b3;
371 int cnt = 0;
372
373 /* optimization: if imm32 is positive, use 'mov %eax, imm32'
374 * (which zero-extends imm32) to save 2 bytes.
375 */
376 if (sign_propagate && (s32)imm32 < 0) {
377 /* 'mov %rax, imm32' sign extends imm32 */
378 b1 = add_1mod(0x48, dst_reg);
379 b2 = 0xC7;
380 b3 = 0xC0;
381 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
382 goto done;
383 }
384
385 /* optimization: if imm32 is zero, use 'xor %eax, %eax'
386 * to save 3 bytes.
387 */
388 if (imm32 == 0) {
389 if (is_ereg(dst_reg))
390 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
391 b2 = 0x31; /* xor */
392 b3 = 0xC0;
393 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
394 goto done;
395 }
396
397 /* mov %eax, imm32 */
398 if (is_ereg(dst_reg))
399 EMIT1(add_1mod(0x40, dst_reg));
400 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
401done:
402 *pprog = prog;
403}
404
405static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
406 const u32 imm32_hi, const u32 imm32_lo)
407{
408 u8 *prog = *pprog;
409 int cnt = 0;
410
411 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
412 /* For emitting plain u32, where sign bit must not be
413 * propagated LLVM tends to load imm64 over mov32
414 * directly, so save couple of bytes by just doing
415 * 'mov %eax, imm32' instead.
416 */
417 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
418 } else {
419 /* movabsq %rax, imm64 */
420 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
421 EMIT(imm32_lo, 4);
422 EMIT(imm32_hi, 4);
423 }
424
425 *pprog = prog;
426}
427
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100428static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
429{
430 u8 *prog = *pprog;
431 int cnt = 0;
432
433 if (is64) {
434 /* mov dst, src */
435 EMIT_mov(dst_reg, src_reg);
436 } else {
437 /* mov32 dst, src */
438 if (is_ereg(dst_reg) || is_ereg(src_reg))
439 EMIT1(add_2mod(0x40, dst_reg, src_reg));
440 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
441 }
442
443 *pprog = prog;
444}
445
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700446static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
447 int oldproglen, struct jit_context *ctx)
448{
449 struct bpf_insn *insn = bpf_prog->insnsi;
450 int insn_cnt = bpf_prog->len;
451 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
Daniel Borkmann959a7572016-05-13 19:08:33 +0200452 bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700453 bool seen_exit = false;
454 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
455 int i, cnt = 0;
456 int proglen = 0;
457 u8 *prog = temp;
458
Daniel Borkmann08691752018-02-24 01:08:02 +0100459 emit_prologue(&prog, bpf_prog->aux->stack_depth,
460 bpf_prog_was_classic(bpf_prog));
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700461
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700462 if (seen_ld_abs)
463 emit_load_skb_data_hlen(&prog);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700464
465 for (i = 0; i < insn_cnt; i++, insn++) {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700466 const s32 imm32 = insn->imm;
467 u32 dst_reg = insn->dst_reg;
468 u32 src_reg = insn->src_reg;
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100469 u8 b2 = 0, b3 = 0;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700470 s64 jmp_offset;
471 u8 jmp_cond;
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700472 bool reload_skb_data;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700473 int ilen;
474 u8 *func;
475
Daniel Borkmann959a7572016-05-13 19:08:33 +0200476 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
477 ctx->seen_ax_reg = seen_ax_reg = true;
478
Alexei Starovoitov62258272014-05-13 19:50:46 -0700479 switch (insn->code) {
480 /* ALU */
481 case BPF_ALU | BPF_ADD | BPF_X:
482 case BPF_ALU | BPF_SUB | BPF_X:
483 case BPF_ALU | BPF_AND | BPF_X:
484 case BPF_ALU | BPF_OR | BPF_X:
485 case BPF_ALU | BPF_XOR | BPF_X:
486 case BPF_ALU64 | BPF_ADD | BPF_X:
487 case BPF_ALU64 | BPF_SUB | BPF_X:
488 case BPF_ALU64 | BPF_AND | BPF_X:
489 case BPF_ALU64 | BPF_OR | BPF_X:
490 case BPF_ALU64 | BPF_XOR | BPF_X:
491 switch (BPF_OP(insn->code)) {
492 case BPF_ADD: b2 = 0x01; break;
493 case BPF_SUB: b2 = 0x29; break;
494 case BPF_AND: b2 = 0x21; break;
495 case BPF_OR: b2 = 0x09; break;
496 case BPF_XOR: b2 = 0x31; break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000497 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700498 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700499 EMIT1(add_2mod(0x48, dst_reg, src_reg));
500 else if (is_ereg(dst_reg) || is_ereg(src_reg))
501 EMIT1(add_2mod(0x40, dst_reg, src_reg));
502 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000503 break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000504
Alexei Starovoitov62258272014-05-13 19:50:46 -0700505 case BPF_ALU64 | BPF_MOV | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700506 case BPF_ALU | BPF_MOV | BPF_X:
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100507 emit_mov_reg(&prog,
508 BPF_CLASS(insn->code) == BPF_ALU64,
509 dst_reg, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700510 break;
Eric Dumazet3b589082013-01-30 17:51:44 -0800511
Alexei Starovoitove430f342014-06-06 14:46:06 -0700512 /* neg dst */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700513 case BPF_ALU | BPF_NEG:
514 case BPF_ALU64 | BPF_NEG:
515 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700516 EMIT1(add_1mod(0x48, dst_reg));
517 else if (is_ereg(dst_reg))
518 EMIT1(add_1mod(0x40, dst_reg));
519 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700520 break;
521
522 case BPF_ALU | BPF_ADD | BPF_K:
523 case BPF_ALU | BPF_SUB | BPF_K:
524 case BPF_ALU | BPF_AND | BPF_K:
525 case BPF_ALU | BPF_OR | BPF_K:
526 case BPF_ALU | BPF_XOR | BPF_K:
527 case BPF_ALU64 | BPF_ADD | BPF_K:
528 case BPF_ALU64 | BPF_SUB | BPF_K:
529 case BPF_ALU64 | BPF_AND | BPF_K:
530 case BPF_ALU64 | BPF_OR | BPF_K:
531 case BPF_ALU64 | BPF_XOR | BPF_K:
532 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700533 EMIT1(add_1mod(0x48, dst_reg));
534 else if (is_ereg(dst_reg))
535 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700536
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100537 /* b3 holds 'normal' opcode, b2 short form only valid
538 * in case dst is eax/rax.
539 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700540 switch (BPF_OP(insn->code)) {
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100541 case BPF_ADD:
542 b3 = 0xC0;
543 b2 = 0x05;
544 break;
545 case BPF_SUB:
546 b3 = 0xE8;
547 b2 = 0x2D;
548 break;
549 case BPF_AND:
550 b3 = 0xE0;
551 b2 = 0x25;
552 break;
553 case BPF_OR:
554 b3 = 0xC8;
555 b2 = 0x0D;
556 break;
557 case BPF_XOR:
558 b3 = 0xF0;
559 b2 = 0x35;
560 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700561 }
562
Alexei Starovoitove430f342014-06-06 14:46:06 -0700563 if (is_imm8(imm32))
564 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100565 else if (is_axreg(dst_reg))
566 EMIT1_off32(b2, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700567 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700568 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700569 break;
570
571 case BPF_ALU64 | BPF_MOV | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700572 case BPF_ALU | BPF_MOV | BPF_K:
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100573 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
574 dst_reg, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700575 break;
576
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700577 case BPF_LD | BPF_IMM | BPF_DW:
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100578 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700579 insn++;
580 i++;
581 break;
582
Alexei Starovoitove430f342014-06-06 14:46:06 -0700583 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700584 case BPF_ALU | BPF_MOD | BPF_X:
585 case BPF_ALU | BPF_DIV | BPF_X:
586 case BPF_ALU | BPF_MOD | BPF_K:
587 case BPF_ALU | BPF_DIV | BPF_K:
588 case BPF_ALU64 | BPF_MOD | BPF_X:
589 case BPF_ALU64 | BPF_DIV | BPF_X:
590 case BPF_ALU64 | BPF_MOD | BPF_K:
591 case BPF_ALU64 | BPF_DIV | BPF_K:
592 EMIT1(0x50); /* push rax */
593 EMIT1(0x52); /* push rdx */
594
595 if (BPF_SRC(insn->code) == BPF_X)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700596 /* mov r11, src_reg */
597 EMIT_mov(AUX_REG, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700598 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700599 /* mov r11, imm32 */
600 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700601
Alexei Starovoitove430f342014-06-06 14:46:06 -0700602 /* mov rax, dst_reg */
603 EMIT_mov(BPF_REG_0, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700604
605 /* xor edx, edx
606 * equivalent to 'xor rdx, rdx', but one byte less
607 */
608 EMIT2(0x31, 0xd2);
609
Alexei Starovoitov62258272014-05-13 19:50:46 -0700610 if (BPF_CLASS(insn->code) == BPF_ALU64)
611 /* div r11 */
612 EMIT3(0x49, 0xF7, 0xF3);
613 else
614 /* div r11d */
615 EMIT3(0x41, 0xF7, 0xF3);
616
617 if (BPF_OP(insn->code) == BPF_MOD)
618 /* mov r11, rdx */
619 EMIT3(0x49, 0x89, 0xD3);
620 else
621 /* mov r11, rax */
622 EMIT3(0x49, 0x89, 0xC3);
623
624 EMIT1(0x5A); /* pop rdx */
625 EMIT1(0x58); /* pop rax */
626
Alexei Starovoitove430f342014-06-06 14:46:06 -0700627 /* mov dst_reg, r11 */
628 EMIT_mov(dst_reg, AUX_REG);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700629 break;
630
631 case BPF_ALU | BPF_MUL | BPF_K:
632 case BPF_ALU | BPF_MUL | BPF_X:
633 case BPF_ALU64 | BPF_MUL | BPF_K:
634 case BPF_ALU64 | BPF_MUL | BPF_X:
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100635 {
636 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
637
Daniel Borkmannd806a0c2018-02-24 01:08:00 +0100638 if (dst_reg != BPF_REG_0)
639 EMIT1(0x50); /* push rax */
640 if (dst_reg != BPF_REG_3)
641 EMIT1(0x52); /* push rdx */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700642
Alexei Starovoitove430f342014-06-06 14:46:06 -0700643 /* mov r11, dst_reg */
644 EMIT_mov(AUX_REG, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700645
646 if (BPF_SRC(insn->code) == BPF_X)
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100647 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700648 else
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100649 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700650
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100651 if (is64)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700652 EMIT1(add_1mod(0x48, AUX_REG));
653 else if (is_ereg(AUX_REG))
654 EMIT1(add_1mod(0x40, AUX_REG));
655 /* mul(q) r11 */
656 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
657
Daniel Borkmannd806a0c2018-02-24 01:08:00 +0100658 if (dst_reg != BPF_REG_3)
659 EMIT1(0x5A); /* pop rdx */
660 if (dst_reg != BPF_REG_0) {
661 /* mov dst_reg, rax */
662 EMIT_mov(dst_reg, BPF_REG_0);
663 EMIT1(0x58); /* pop rax */
664 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700665 break;
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100666 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700667 /* shifts */
668 case BPF_ALU | BPF_LSH | BPF_K:
669 case BPF_ALU | BPF_RSH | BPF_K:
670 case BPF_ALU | BPF_ARSH | BPF_K:
671 case BPF_ALU64 | BPF_LSH | BPF_K:
672 case BPF_ALU64 | BPF_RSH | BPF_K:
673 case BPF_ALU64 | BPF_ARSH | BPF_K:
674 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700675 EMIT1(add_1mod(0x48, dst_reg));
676 else if (is_ereg(dst_reg))
677 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700678
679 switch (BPF_OP(insn->code)) {
680 case BPF_LSH: b3 = 0xE0; break;
681 case BPF_RSH: b3 = 0xE8; break;
682 case BPF_ARSH: b3 = 0xF8; break;
683 }
Daniel Borkmann88e69a12018-02-24 01:07:58 +0100684
685 if (imm32 == 1)
686 EMIT2(0xD1, add_1reg(b3, dst_reg));
687 else
688 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700689 break;
690
Alexei Starovoitov72b603e2014-08-25 12:27:02 -0700691 case BPF_ALU | BPF_LSH | BPF_X:
692 case BPF_ALU | BPF_RSH | BPF_X:
693 case BPF_ALU | BPF_ARSH | BPF_X:
694 case BPF_ALU64 | BPF_LSH | BPF_X:
695 case BPF_ALU64 | BPF_RSH | BPF_X:
696 case BPF_ALU64 | BPF_ARSH | BPF_X:
697
698 /* check for bad case when dst_reg == rcx */
699 if (dst_reg == BPF_REG_4) {
700 /* mov r11, dst_reg */
701 EMIT_mov(AUX_REG, dst_reg);
702 dst_reg = AUX_REG;
703 }
704
705 if (src_reg != BPF_REG_4) { /* common case */
706 EMIT1(0x51); /* push rcx */
707
708 /* mov rcx, src_reg */
709 EMIT_mov(BPF_REG_4, src_reg);
710 }
711
712 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
713 if (BPF_CLASS(insn->code) == BPF_ALU64)
714 EMIT1(add_1mod(0x48, dst_reg));
715 else if (is_ereg(dst_reg))
716 EMIT1(add_1mod(0x40, dst_reg));
717
718 switch (BPF_OP(insn->code)) {
719 case BPF_LSH: b3 = 0xE0; break;
720 case BPF_RSH: b3 = 0xE8; break;
721 case BPF_ARSH: b3 = 0xF8; break;
722 }
723 EMIT2(0xD3, add_1reg(b3, dst_reg));
724
725 if (src_reg != BPF_REG_4)
726 EMIT1(0x59); /* pop rcx */
727
728 if (insn->dst_reg == BPF_REG_4)
729 /* mov dst_reg, r11 */
730 EMIT_mov(insn->dst_reg, AUX_REG);
731 break;
732
Alexei Starovoitov62258272014-05-13 19:50:46 -0700733 case BPF_ALU | BPF_END | BPF_FROM_BE:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700734 switch (imm32) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700735 case 16:
736 /* emit 'ror %ax, 8' to swap lower 2 bytes */
737 EMIT1(0x66);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700738 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700739 EMIT1(0x41);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700740 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700741
742 /* emit 'movzwl eax, ax' */
743 if (is_ereg(dst_reg))
744 EMIT3(0x45, 0x0F, 0xB7);
745 else
746 EMIT2(0x0F, 0xB7);
747 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000748 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700749 case 32:
750 /* emit 'bswap eax' to swap lower 4 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700751 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700752 EMIT2(0x41, 0x0F);
753 else
754 EMIT1(0x0F);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700755 EMIT1(add_1reg(0xC8, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000756 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700757 case 64:
758 /* emit 'bswap rax' to swap 8 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700759 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
760 add_1reg(0xC8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700761 break;
762 }
763 break;
764
765 case BPF_ALU | BPF_END | BPF_FROM_LE:
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700766 switch (imm32) {
767 case 16:
768 /* emit 'movzwl eax, ax' to zero extend 16-bit
769 * into 64 bit
770 */
771 if (is_ereg(dst_reg))
772 EMIT3(0x45, 0x0F, 0xB7);
773 else
774 EMIT2(0x0F, 0xB7);
775 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
776 break;
777 case 32:
778 /* emit 'mov eax, eax' to clear upper 32-bits */
779 if (is_ereg(dst_reg))
780 EMIT1(0x45);
781 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
782 break;
783 case 64:
784 /* nop */
785 break;
786 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700787 break;
788
Alexei Starovoitove430f342014-06-06 14:46:06 -0700789 /* ST: *(u8*)(dst_reg + off) = imm */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700790 case BPF_ST | BPF_MEM | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700791 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700792 EMIT2(0x41, 0xC6);
793 else
794 EMIT1(0xC6);
795 goto st;
796 case BPF_ST | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700797 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700798 EMIT3(0x66, 0x41, 0xC7);
799 else
800 EMIT2(0x66, 0xC7);
801 goto st;
802 case BPF_ST | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700803 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700804 EMIT2(0x41, 0xC7);
805 else
806 EMIT1(0xC7);
807 goto st;
808 case BPF_ST | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700809 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700810
811st: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700812 EMIT2(add_1reg(0x40, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700813 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700814 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700815
Alexei Starovoitove430f342014-06-06 14:46:06 -0700816 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700817 break;
818
Alexei Starovoitove430f342014-06-06 14:46:06 -0700819 /* STX: *(u8*)(dst_reg + off) = src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700820 case BPF_STX | BPF_MEM | BPF_B:
821 /* emit 'mov byte ptr [rax + off], al' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700822 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
Alexei Starovoitov62258272014-05-13 19:50:46 -0700823 /* have to add extra byte for x86 SIL, DIL regs */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700824 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
825 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700826 else
827 EMIT1(0x88);
828 goto stx;
829 case BPF_STX | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700830 if (is_ereg(dst_reg) || is_ereg(src_reg))
831 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700832 else
833 EMIT2(0x66, 0x89);
834 goto stx;
835 case BPF_STX | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700836 if (is_ereg(dst_reg) || is_ereg(src_reg))
837 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700838 else
839 EMIT1(0x89);
840 goto stx;
841 case BPF_STX | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700842 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700843stx: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700844 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700845 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700846 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700847 insn->off);
848 break;
849
Alexei Starovoitove430f342014-06-06 14:46:06 -0700850 /* LDX: dst_reg = *(u8*)(src_reg + off) */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700851 case BPF_LDX | BPF_MEM | BPF_B:
852 /* emit 'movzx rax, byte ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700853 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700854 goto ldx;
855 case BPF_LDX | BPF_MEM | BPF_H:
856 /* emit 'movzx rax, word ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700857 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700858 goto ldx;
859 case BPF_LDX | BPF_MEM | BPF_W:
860 /* emit 'mov eax, dword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700861 if (is_ereg(dst_reg) || is_ereg(src_reg))
862 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700863 else
864 EMIT1(0x8B);
865 goto ldx;
866 case BPF_LDX | BPF_MEM | BPF_DW:
867 /* emit 'mov rax, qword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700868 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700869ldx: /* if insn->off == 0 we can save one extra byte, but
870 * special case of x86 r13 which always needs an offset
871 * is not worth the hassle
872 */
873 if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700874 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700875 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700876 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700877 insn->off);
878 break;
879
Alexei Starovoitove430f342014-06-06 14:46:06 -0700880 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700881 case BPF_STX | BPF_XADD | BPF_W:
882 /* emit 'lock add dword ptr [rax + off], eax' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700883 if (is_ereg(dst_reg) || is_ereg(src_reg))
884 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700885 else
886 EMIT2(0xF0, 0x01);
887 goto xadd;
888 case BPF_STX | BPF_XADD | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700889 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700890xadd: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700891 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700892 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700893 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700894 insn->off);
895 break;
896
897 /* call */
898 case BPF_JMP | BPF_CALL:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700899 func = (u8 *) __bpf_call_base + imm32;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700900 jmp_offset = func - (image + addrs[i]);
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700901 if (seen_ld_abs) {
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800902 reload_skb_data = bpf_helper_changes_pkt_data(func);
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700903 if (reload_skb_data) {
904 EMIT1(0x57); /* push %rdi */
905 jmp_offset += 22; /* pop, mov, sub, mov */
906 } else {
907 EMIT2(0x41, 0x52); /* push %r10 */
908 EMIT2(0x41, 0x51); /* push %r9 */
909 /* need to adjust jmp offset, since
910 * pop %r9, pop %r10 take 4 bytes after call insn
911 */
912 jmp_offset += 4;
913 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700914 }
Alexei Starovoitove430f342014-06-06 14:46:06 -0700915 if (!imm32 || !is_simm32(jmp_offset)) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700916 pr_err("unsupported bpf func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -0700917 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700918 return -EINVAL;
919 }
920 EMIT1_off32(0xE8, jmp_offset);
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700921 if (seen_ld_abs) {
Alexei Starovoitov4e10df92015-07-20 20:34:18 -0700922 if (reload_skb_data) {
923 EMIT1(0x5F); /* pop %rdi */
924 emit_load_skb_data_hlen(&prog);
925 } else {
926 EMIT2(0x41, 0x59); /* pop %r9 */
927 EMIT2(0x41, 0x5A); /* pop %r10 */
928 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700929 }
930 break;
931
Alexei Starovoitov71189fa2017-05-30 13:31:27 -0700932 case BPF_JMP | BPF_TAIL_CALL:
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700933 emit_bpf_tail_call(&prog);
934 break;
935
Alexei Starovoitov62258272014-05-13 19:50:46 -0700936 /* cond jump */
937 case BPF_JMP | BPF_JEQ | BPF_X:
938 case BPF_JMP | BPF_JNE | BPF_X:
939 case BPF_JMP | BPF_JGT | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200940 case BPF_JMP | BPF_JLT | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700941 case BPF_JMP | BPF_JGE | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200942 case BPF_JMP | BPF_JLE | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700943 case BPF_JMP | BPF_JSGT | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200944 case BPF_JMP | BPF_JSLT | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700945 case BPF_JMP | BPF_JSGE | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200946 case BPF_JMP | BPF_JSLE | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700947 /* cmp dst_reg, src_reg */
948 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
949 add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700950 goto emit_cond_jmp;
951
952 case BPF_JMP | BPF_JSET | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700953 /* test dst_reg, src_reg */
954 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
955 add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700956 goto emit_cond_jmp;
957
958 case BPF_JMP | BPF_JSET | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700959 /* test dst_reg, imm32 */
960 EMIT1(add_1mod(0x48, dst_reg));
961 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700962 goto emit_cond_jmp;
963
964 case BPF_JMP | BPF_JEQ | BPF_K:
965 case BPF_JMP | BPF_JNE | BPF_K:
966 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200967 case BPF_JMP | BPF_JLT | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700968 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200969 case BPF_JMP | BPF_JLE | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700970 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200971 case BPF_JMP | BPF_JSLT | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700972 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200973 case BPF_JMP | BPF_JSLE | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700974 /* cmp dst_reg, imm8/32 */
975 EMIT1(add_1mod(0x48, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700976
Alexei Starovoitove430f342014-06-06 14:46:06 -0700977 if (is_imm8(imm32))
978 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700979 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700980 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700981
982emit_cond_jmp: /* convert BPF opcode to x86 */
983 switch (BPF_OP(insn->code)) {
984 case BPF_JEQ:
985 jmp_cond = X86_JE;
986 break;
987 case BPF_JSET:
988 case BPF_JNE:
989 jmp_cond = X86_JNE;
990 break;
991 case BPF_JGT:
992 /* GT is unsigned '>', JA in x86 */
993 jmp_cond = X86_JA;
994 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +0200995 case BPF_JLT:
996 /* LT is unsigned '<', JB in x86 */
997 jmp_cond = X86_JB;
998 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700999 case BPF_JGE:
1000 /* GE is unsigned '>=', JAE in x86 */
1001 jmp_cond = X86_JAE;
1002 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +02001003 case BPF_JLE:
1004 /* LE is unsigned '<=', JBE in x86 */
1005 jmp_cond = X86_JBE;
1006 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -07001007 case BPF_JSGT:
1008 /* signed '>', GT in x86 */
1009 jmp_cond = X86_JG;
1010 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +02001011 case BPF_JSLT:
1012 /* signed '<', LT in x86 */
1013 jmp_cond = X86_JL;
1014 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -07001015 case BPF_JSGE:
1016 /* signed '>=', GE in x86 */
1017 jmp_cond = X86_JGE;
1018 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +02001019 case BPF_JSLE:
1020 /* signed '<=', LE in x86 */
1021 jmp_cond = X86_JLE;
1022 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -07001023 default: /* to silence gcc warning */
1024 return -EFAULT;
1025 }
1026 jmp_offset = addrs[i + insn->off] - addrs[i];
1027 if (is_imm8(jmp_offset)) {
1028 EMIT2(jmp_cond, jmp_offset);
1029 } else if (is_simm32(jmp_offset)) {
1030 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1031 } else {
1032 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1033 return -EFAULT;
1034 }
1035
1036 break;
1037
1038 case BPF_JMP | BPF_JA:
1039 jmp_offset = addrs[i + insn->off] - addrs[i];
1040 if (!jmp_offset)
1041 /* optimize out nop jumps */
1042 break;
1043emit_jmp:
1044 if (is_imm8(jmp_offset)) {
1045 EMIT2(0xEB, jmp_offset);
1046 } else if (is_simm32(jmp_offset)) {
1047 EMIT1_off32(0xE9, jmp_offset);
1048 } else {
1049 pr_err("jmp gen bug %llx\n", jmp_offset);
1050 return -EFAULT;
1051 }
1052 break;
1053
1054 case BPF_LD | BPF_IND | BPF_W:
1055 func = sk_load_word;
1056 goto common_load;
1057 case BPF_LD | BPF_ABS | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -07001058 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001059common_load:
1060 ctx->seen_ld_abs = seen_ld_abs = true;
Alexei Starovoitov62258272014-05-13 19:50:46 -07001061 jmp_offset = func - (image + addrs[i]);
1062 if (!func || !is_simm32(jmp_offset)) {
1063 pr_err("unsupported bpf func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -07001064 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001065 return -EINVAL;
1066 }
1067 if (BPF_MODE(insn->code) == BPF_ABS) {
1068 /* mov %esi, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -07001069 EMIT1_off32(0xBE, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001070 } else {
Alexei Starovoitove430f342014-06-06 14:46:06 -07001071 /* mov %rsi, src_reg */
1072 EMIT_mov(BPF_REG_2, src_reg);
1073 if (imm32) {
1074 if (is_imm8(imm32))
Alexei Starovoitov62258272014-05-13 19:50:46 -07001075 /* add %esi, imm8 */
Alexei Starovoitove430f342014-06-06 14:46:06 -07001076 EMIT3(0x83, 0xC6, imm32);
Eric Dumazet0a148422011-04-20 09:27:32 +00001077 else
Alexei Starovoitov62258272014-05-13 19:50:46 -07001078 /* add %esi, imm32 */
Alexei Starovoitove430f342014-06-06 14:46:06 -07001079 EMIT2_off32(0x81, 0xC6, imm32);
Eric Dumazet0a148422011-04-20 09:27:32 +00001080 }
Alexei Starovoitov62258272014-05-13 19:50:46 -07001081 }
1082 /* skb pointer is in R6 (%rbx), it will be copied into
1083 * %rdi if skb_copy_bits() call is necessary.
1084 * sk_load_* helpers also use %r10 and %r9d.
1085 * See bpf_jit.S
1086 */
Daniel Borkmann959a7572016-05-13 19:08:33 +02001087 if (seen_ax_reg)
1088 /* r10 = skb->data, mov %r10, off32(%rbx) */
1089 EMIT3_off32(0x4c, 0x8b, 0x93,
1090 offsetof(struct sk_buff, data));
Alexei Starovoitov62258272014-05-13 19:50:46 -07001091 EMIT1_off32(0xE8, jmp_offset); /* call */
1092 break;
1093
1094 case BPF_LD | BPF_IND | BPF_H:
1095 func = sk_load_half;
1096 goto common_load;
1097 case BPF_LD | BPF_ABS | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -07001098 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001099 goto common_load;
1100 case BPF_LD | BPF_IND | BPF_B:
1101 func = sk_load_byte;
1102 goto common_load;
1103 case BPF_LD | BPF_ABS | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -07001104 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001105 goto common_load;
1106
1107 case BPF_JMP | BPF_EXIT:
Alexei Starovoitov769e0de2014-11-29 14:46:13 -08001108 if (seen_exit) {
Alexei Starovoitov62258272014-05-13 19:50:46 -07001109 jmp_offset = ctx->cleanup_addr - addrs[i];
1110 goto emit_jmp;
1111 }
Alexei Starovoitov769e0de2014-11-29 14:46:13 -08001112 seen_exit = true;
Alexei Starovoitov62258272014-05-13 19:50:46 -07001113 /* update cleanup_addr */
1114 ctx->cleanup_addr = proglen;
Alexei Starovoitov177366b2017-05-30 13:31:34 -07001115 /* mov rbx, qword ptr [rbp+0] */
1116 EMIT4(0x48, 0x8B, 0x5D, 0);
1117 /* mov r13, qword ptr [rbp+8] */
1118 EMIT4(0x4C, 0x8B, 0x6D, 8);
1119 /* mov r14, qword ptr [rbp+16] */
1120 EMIT4(0x4C, 0x8B, 0x75, 16);
1121 /* mov r15, qword ptr [rbp+24] */
1122 EMIT4(0x4C, 0x8B, 0x7D, 24);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001123
Alexei Starovoitov177366b2017-05-30 13:31:34 -07001124 /* add rbp, AUX_STACK_SPACE */
1125 EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001126 EMIT1(0xC9); /* leave */
1127 EMIT1(0xC3); /* ret */
1128 break;
1129
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001130 default:
Alexei Starovoitov62258272014-05-13 19:50:46 -07001131 /* By design x64 JIT should support all BPF instructions
1132 * This error will be seen if new instruction was added
1133 * to interpreter, but not to JIT
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001134 * or if there is junk in bpf_prog
Alexei Starovoitov62258272014-05-13 19:50:46 -07001135 */
1136 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001137 return -EINVAL;
Eric Dumazet0a148422011-04-20 09:27:32 +00001138 }
Alexei Starovoitov62258272014-05-13 19:50:46 -07001139
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001140 ilen = prog - temp;
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001141 if (ilen > BPF_MAX_INSN_SIZE) {
Daniel Borkmann93831912017-02-16 22:24:49 +01001142 pr_err("bpf_jit: fatal insn size error\n");
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001143 return -EFAULT;
1144 }
1145
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001146 if (image) {
1147 if (unlikely(proglen + ilen > oldproglen)) {
Daniel Borkmann93831912017-02-16 22:24:49 +01001148 pr_err("bpf_jit: fatal error\n");
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001149 return -EFAULT;
1150 }
1151 memcpy(image + proglen, temp, ilen);
1152 }
1153 proglen += ilen;
1154 addrs[i] = proglen;
1155 prog = temp;
1156 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001157 return proglen;
1158}
1159
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001160struct x64_jit_data {
1161 struct bpf_binary_header *header;
1162 int *addrs;
1163 u8 *image;
1164 int proglen;
1165 struct jit_context ctx;
1166};
1167
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001168struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
Alexei Starovoitov62258272014-05-13 19:50:46 -07001169{
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001170 struct bpf_binary_header *header = NULL;
Daniel Borkmann959a7572016-05-13 19:08:33 +02001171 struct bpf_prog *tmp, *orig_prog = prog;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001172 struct x64_jit_data *jit_data;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001173 int proglen, oldproglen = 0;
1174 struct jit_context ctx = {};
Daniel Borkmann959a7572016-05-13 19:08:33 +02001175 bool tmp_blinded = false;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001176 bool extra_pass = false;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001177 u8 *image = NULL;
1178 int *addrs;
1179 int pass;
1180 int i;
1181
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -08001182 if (!prog->jit_requested)
Daniel Borkmann959a7572016-05-13 19:08:33 +02001183 return orig_prog;
1184
1185 tmp = bpf_jit_blind_constants(prog);
1186 /* If blinding was requested and we failed during blinding,
1187 * we must fall back to the interpreter.
1188 */
1189 if (IS_ERR(tmp))
1190 return orig_prog;
1191 if (tmp != prog) {
1192 tmp_blinded = true;
1193 prog = tmp;
1194 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001195
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001196 jit_data = prog->aux->jit_data;
1197 if (!jit_data) {
1198 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1199 if (!jit_data) {
1200 prog = orig_prog;
1201 goto out;
1202 }
1203 prog->aux->jit_data = jit_data;
1204 }
1205 addrs = jit_data->addrs;
1206 if (addrs) {
1207 ctx = jit_data->ctx;
1208 oldproglen = jit_data->proglen;
1209 image = jit_data->image;
1210 header = jit_data->header;
1211 extra_pass = true;
1212 goto skip_init_addrs;
1213 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001214 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001215 if (!addrs) {
1216 prog = orig_prog;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001217 goto out_addrs;
Daniel Borkmann959a7572016-05-13 19:08:33 +02001218 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001219
1220 /* Before first pass, make a rough estimation of addrs[]
1221 * each bpf instruction is translated to less than 64 bytes
1222 */
1223 for (proglen = 0, i = 0; i < prog->len; i++) {
1224 proglen += 64;
1225 addrs[i] = proglen;
1226 }
1227 ctx.cleanup_addr = proglen;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001228skip_init_addrs:
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001229
Alexei Starovoitov3f7352b2015-05-22 15:42:55 -07001230 /* JITed image shrinks with every pass and the loop iterates
1231 * until the image stops shrinking. Very large bpf programs
1232 * may converge on the last pass. In such case do one more
1233 * pass to emit the final image
1234 */
1235 for (pass = 0; pass < 10 || image; pass++) {
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001236 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1237 if (proglen <= 0) {
1238 image = NULL;
1239 if (header)
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001240 bpf_jit_binary_free(header);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001241 prog = orig_prog;
1242 goto out_addrs;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001243 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001244 if (image) {
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001245 if (proglen != oldproglen) {
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001246 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1247 proglen, oldproglen);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001248 prog = orig_prog;
1249 goto out_addrs;
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001250 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001251 break;
1252 }
1253 if (proglen == oldproglen) {
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001254 header = bpf_jit_binary_alloc(proglen, &image,
1255 1, jit_fill_hole);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001256 if (!header) {
1257 prog = orig_prog;
1258 goto out_addrs;
1259 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001260 }
1261 oldproglen = proglen;
1262 }
Daniel Borkmann79617802013-03-21 22:22:03 +01001263
Eric Dumazet0a148422011-04-20 09:27:32 +00001264 if (bpf_jit_enable > 1)
Daniel Borkmann485d6512015-07-30 12:42:48 +02001265 bpf_jit_dump(prog->len, proglen, pass + 1, image);
Eric Dumazet0a148422011-04-20 09:27:32 +00001266
1267 if (image) {
Eric Dumazet314beb92013-05-17 16:37:03 +00001268 bpf_flush_icache(header, image + proglen);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001269 if (!prog->is_func || extra_pass) {
1270 bpf_jit_binary_lock_ro(header);
1271 } else {
1272 jit_data->addrs = addrs;
1273 jit_data->ctx = ctx;
1274 jit_data->proglen = proglen;
1275 jit_data->image = image;
1276 jit_data->header = header;
1277 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001278 prog->bpf_func = (void *)image;
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001279 prog->jited = 1;
Martin KaFai Lau783d28dd12017-06-05 12:15:51 -07001280 prog->jited_len = proglen;
Daniel Borkmann9d5ecb02017-01-07 00:26:33 +01001281 } else {
1282 prog = orig_prog;
Eric Dumazet0a148422011-04-20 09:27:32 +00001283 }
Daniel Borkmann959a7572016-05-13 19:08:33 +02001284
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001285 if (!prog->is_func || extra_pass) {
Daniel Borkmann959a7572016-05-13 19:08:33 +02001286out_addrs:
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001287 kfree(addrs);
1288 kfree(jit_data);
1289 prog->aux->jit_data = NULL;
1290 }
Daniel Borkmann959a7572016-05-13 19:08:33 +02001291out:
1292 if (tmp_blinded)
1293 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1294 tmp : orig_prog);
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001295 return prog;
Eric Dumazet0a148422011-04-20 09:27:32 +00001296}