blob: 20c146d1251ae2cd6c07279bf371adae6b2e3a1e [file] [log] [blame]
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001/*
2 * BPF Jit compiler for s390.
3 *
Michael Holzheu05462312015-04-01 16:08:32 +02004 * Minimum build requirements:
5 *
6 * - HAVE_MARCH_Z196_FEATURES: laal, laalg
7 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
8 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
9 * - PACK_STACK
10 * - 64BIT
11 *
12 * Copyright IBM Corp. 2012,2015
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020013 *
14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
Michael Holzheu05462312015-04-01 16:08:32 +020015 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020016 */
Michael Holzheu05462312015-04-01 16:08:32 +020017
18#define KMSG_COMPONENT "bpf_jit"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020021#include <linux/netdevice.h>
22#include <linux/filter.h>
Heiko Carstensc9a7afa2013-07-17 14:26:50 +020023#include <linux/init.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020024#include <asm/cacheflush.h>
Heiko Carstens0f208222013-09-13 13:36:25 +020025#include <asm/dis.h>
Michael Holzheu05462312015-04-01 16:08:32 +020026#include "bpf_jit.h"
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020027
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020028int bpf_jit_enable __read_mostly;
29
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020030struct bpf_jit {
Michael Holzheu05462312015-04-01 16:08:32 +020031 u32 seen; /* Flags to remember seen eBPF instructions */
32 u32 seen_reg[16]; /* Array to remember which registers are used */
33 u32 *addrs; /* Array with relative instruction addresses */
34 u8 *prg_buf; /* Start of program */
35 int size; /* Size of program and literal pool */
36 int size_prg; /* Size of program */
37 int prg; /* Current position in program */
38 int lit_start; /* Start of literal pool */
39 int lit; /* Current position in literal pool */
40 int base_ip; /* Base address for literal pool */
41 int ret0_ip; /* Address of return 0 */
42 int exit_ip; /* Address of exit */
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020043};
44
45#define BPF_SIZE_MAX 4096 /* Max size for program */
46
Michael Holzheu05462312015-04-01 16:08:32 +020047#define SEEN_SKB 1 /* skb access */
48#define SEEN_MEM 2 /* use mem[] for temporary storage */
49#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */
50#define SEEN_LITERAL 8 /* code uses literals */
51#define SEEN_FUNC 16 /* calls C functions */
52#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020053
Michael Holzheu05462312015-04-01 16:08:32 +020054/*
55 * s390 registers
56 */
57#define REG_W0 (__MAX_BPF_REG+0) /* Work register 1 (even) */
58#define REG_W1 (__MAX_BPF_REG+1) /* Work register 2 (odd) */
59#define REG_SKB_DATA (__MAX_BPF_REG+2) /* SKB data register */
60#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */
61#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */
62#define REG_0 REG_W0 /* Register 0 */
63#define REG_2 BPF_REG_1 /* Register 2 */
64#define REG_14 BPF_REG_0 /* Register 14 */
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020065
Michael Holzheu05462312015-04-01 16:08:32 +020066/*
67 * Mapping of BPF registers to s390 registers
68 */
69static const int reg2hex[] = {
70 /* Return code */
71 [BPF_REG_0] = 14,
72 /* Function parameters */
73 [BPF_REG_1] = 2,
74 [BPF_REG_2] = 3,
75 [BPF_REG_3] = 4,
76 [BPF_REG_4] = 5,
77 [BPF_REG_5] = 6,
78 /* Call saved registers */
79 [BPF_REG_6] = 7,
80 [BPF_REG_7] = 8,
81 [BPF_REG_8] = 9,
82 [BPF_REG_9] = 10,
83 /* BPF stack pointer */
84 [BPF_REG_FP] = 13,
85 /* SKB data pointer */
86 [REG_SKB_DATA] = 12,
87 /* Work registers for s390x backend */
88 [REG_W0] = 0,
89 [REG_W1] = 1,
90 [REG_L] = 11,
91 [REG_15] = 15,
92};
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020093
Michael Holzheu05462312015-04-01 16:08:32 +020094static inline u32 reg(u32 dst_reg, u32 src_reg)
Daniel Borkmann738cbe72014-09-08 08:04:47 +020095{
Michael Holzheu05462312015-04-01 16:08:32 +020096 return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
97}
98
99static inline u32 reg_high(u32 reg)
100{
101 return reg2hex[reg] << 4;
102}
103
104static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
105{
106 u32 r1 = reg2hex[b1];
107
108 if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
109 jit->seen_reg[r1] = 1;
110}
111
112#define REG_SET_SEEN(b1) \
113({ \
114 reg_set_seen(jit, b1); \
115})
116
117#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
118
119/*
120 * EMIT macros for code generation
121 */
122
123#define _EMIT2(op) \
124({ \
125 if (jit->prg_buf) \
126 *(u16 *) (jit->prg_buf + jit->prg) = op; \
127 jit->prg += 2; \
128})
129
130#define EMIT2(op, b1, b2) \
131({ \
132 _EMIT2(op | reg(b1, b2)); \
133 REG_SET_SEEN(b1); \
134 REG_SET_SEEN(b2); \
135})
136
137#define _EMIT4(op) \
138({ \
139 if (jit->prg_buf) \
140 *(u32 *) (jit->prg_buf + jit->prg) = op; \
141 jit->prg += 4; \
142})
143
144#define EMIT4(op, b1, b2) \
145({ \
146 _EMIT4(op | reg(b1, b2)); \
147 REG_SET_SEEN(b1); \
148 REG_SET_SEEN(b2); \
149})
150
151#define EMIT4_RRF(op, b1, b2, b3) \
152({ \
153 _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \
154 REG_SET_SEEN(b1); \
155 REG_SET_SEEN(b2); \
156 REG_SET_SEEN(b3); \
157})
158
159#define _EMIT4_DISP(op, disp) \
160({ \
161 unsigned int __disp = (disp) & 0xfff; \
162 _EMIT4(op | __disp); \
163})
164
165#define EMIT4_DISP(op, b1, b2, disp) \
166({ \
167 _EMIT4_DISP(op | reg_high(b1) << 16 | \
168 reg_high(b2) << 8, disp); \
169 REG_SET_SEEN(b1); \
170 REG_SET_SEEN(b2); \
171})
172
173#define EMIT4_IMM(op, b1, imm) \
174({ \
175 unsigned int __imm = (imm) & 0xffff; \
176 _EMIT4(op | reg_high(b1) << 16 | __imm); \
177 REG_SET_SEEN(b1); \
178})
179
180#define EMIT4_PCREL(op, pcrel) \
181({ \
182 long __pcrel = ((pcrel) >> 1) & 0xffff; \
183 _EMIT4(op | __pcrel); \
184})
185
186#define _EMIT6(op1, op2) \
187({ \
188 if (jit->prg_buf) { \
189 *(u32 *) (jit->prg_buf + jit->prg) = op1; \
190 *(u16 *) (jit->prg_buf + jit->prg + 4) = op2; \
191 } \
192 jit->prg += 6; \
193})
194
195#define _EMIT6_DISP(op1, op2, disp) \
196({ \
197 unsigned int __disp = (disp) & 0xfff; \
198 _EMIT6(op1 | __disp, op2); \
199})
200
201#define EMIT6_DISP(op1, op2, b1, b2, b3, disp) \
202({ \
203 _EMIT6_DISP(op1 | reg(b1, b2) << 16 | \
204 reg_high(b3) << 8, op2, disp); \
205 REG_SET_SEEN(b1); \
206 REG_SET_SEEN(b2); \
207 REG_SET_SEEN(b3); \
208})
209
210#define _EMIT6_DISP_LH(op1, op2, disp) \
211({ \
212 unsigned int __disp_h = ((u32)disp) & 0xff000; \
213 unsigned int __disp_l = ((u32)disp) & 0x00fff; \
214 _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \
215})
216
217#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
218({ \
219 _EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 | \
220 reg_high(b3) << 8, op2, disp); \
221 REG_SET_SEEN(b1); \
222 REG_SET_SEEN(b2); \
223 REG_SET_SEEN(b3); \
224})
225
226#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
227({ \
228 /* Branch instruction needs 6 bytes */ \
229 int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
230 _EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask); \
231 REG_SET_SEEN(b1); \
232 REG_SET_SEEN(b2); \
233})
234
235#define _EMIT6_IMM(op, imm) \
236({ \
237 unsigned int __imm = (imm); \
238 _EMIT6(op | (__imm >> 16), __imm & 0xffff); \
239})
240
241#define EMIT6_IMM(op, b1, imm) \
242({ \
243 _EMIT6_IMM(op | reg_high(b1) << 16, imm); \
244 REG_SET_SEEN(b1); \
245})
246
247#define EMIT_CONST_U32(val) \
248({ \
249 unsigned int ret; \
250 ret = jit->lit - jit->base_ip; \
251 jit->seen |= SEEN_LITERAL; \
252 if (jit->prg_buf) \
253 *(u32 *) (jit->prg_buf + jit->lit) = (u32) val; \
254 jit->lit += 4; \
255 ret; \
256})
257
258#define EMIT_CONST_U64(val) \
259({ \
260 unsigned int ret; \
261 ret = jit->lit - jit->base_ip; \
262 jit->seen |= SEEN_LITERAL; \
263 if (jit->prg_buf) \
264 *(u64 *) (jit->prg_buf + jit->lit) = (u64) val; \
265 jit->lit += 8; \
266 ret; \
267})
268
269#define EMIT_ZERO(b1) \
270({ \
271 /* llgfr %dst,%dst (zero extend to 64 bit) */ \
272 EMIT4(0xb9160000, b1, b1); \
273 REG_SET_SEEN(b1); \
274})
275
276/*
277 * Fill whole space with illegal instructions
278 */
279static void jit_fill_hole(void *area, unsigned int size)
280{
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200281 memset(area, 0, size);
282}
283
Michael Holzheu05462312015-04-01 16:08:32 +0200284/*
285 * Save registers from "rs" (register start) to "re" (register end) on stack
286 */
287static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
288{
289 u32 off = 72 + (rs - 6) * 8;
290
291 if (rs == re)
292 /* stg %rs,off(%r15) */
293 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
294 else
295 /* stmg %rs,%re,off(%r15) */
296 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
297}
298
299/*
300 * Restore registers from "rs" (register start) to "re" (register end) on stack
301 */
302static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
303{
304 u32 off = 72 + (rs - 6) * 8;
305
306 if (jit->seen & SEEN_STACK)
307 off += STK_OFF;
308
309 if (rs == re)
310 /* lg %rs,off(%r15) */
311 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
312 else
313 /* lmg %rs,%re,off(%r15) */
314 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
315}
316
317/*
318 * Return first seen register (from start)
319 */
320static int get_start(struct bpf_jit *jit, int start)
321{
322 int i;
323
324 for (i = start; i <= 15; i++) {
325 if (jit->seen_reg[i])
326 return i;
327 }
328 return 0;
329}
330
331/*
332 * Return last seen register (from start) (gap >= 2)
333 */
334static int get_end(struct bpf_jit *jit, int start)
335{
336 int i;
337
338 for (i = start; i < 15; i++) {
339 if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
340 return i - 1;
341 }
342 return jit->seen_reg[15] ? 15 : 14;
343}
344
345#define REGS_SAVE 1
346#define REGS_RESTORE 0
347/*
348 * Save and restore clobbered registers (6-15) on stack.
349 * We save/restore registers in chunks with gap >= 2 registers.
350 */
351static void save_restore_regs(struct bpf_jit *jit, int op)
352{
353
354 int re = 6, rs;
355
356 do {
357 rs = get_start(jit, re);
358 if (!rs)
359 break;
360 re = get_end(jit, rs + 1);
361 if (op == REGS_SAVE)
362 save_regs(jit, rs, re);
363 else
364 restore_regs(jit, rs, re);
365 re++;
366 } while (re <= 15);
367}
368
369/*
370 * Emit function prologue
371 *
372 * Save registers and create stack frame if necessary.
373 * See stack frame layout desription in "bpf_jit.h"!
374 */
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200375static void bpf_jit_prologue(struct bpf_jit *jit)
376{
Michael Holzheu05462312015-04-01 16:08:32 +0200377 /* Save registers */
378 save_restore_regs(jit, REGS_SAVE);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200379 /* Setup literal pool */
380 if (jit->seen & SEEN_LITERAL) {
381 /* basr %r13,0 */
Michael Holzheu05462312015-04-01 16:08:32 +0200382 EMIT2(0x0d00, REG_L, REG_0);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200383 jit->base_ip = jit->prg;
384 }
Michael Holzheu05462312015-04-01 16:08:32 +0200385 /* Setup stack and backchain */
386 if (jit->seen & SEEN_STACK) {
387 /* lgr %bfp,%r15 (BPF frame pointer) */
388 EMIT4(0xb9040000, BPF_REG_FP, REG_15);
389 /* aghi %r15,-STK_OFF */
390 EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
391 if (jit->seen & SEEN_FUNC)
392 /* stg %bfp,152(%r15) (backchain) */
393 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
394 REG_15, 152);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200395 }
Michael Holzheu05462312015-04-01 16:08:32 +0200396 /*
397 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
398 * we store the SKB header length on the stack and the SKB data
399 * pointer in REG_SKB_DATA.
400 */
401 if (jit->seen & SEEN_SKB) {
402 /* Header length: llgf %w1,<len>(%b1) */
403 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
404 offsetof(struct sk_buff, len));
405 /* s %w1,<data_len>(%b1) */
406 EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
407 offsetof(struct sk_buff, data_len));
408 /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
409 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
410 STK_OFF_HLEN);
411 /* lg %skb_data,data_off(%b1) */
412 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
413 BPF_REG_1, offsetof(struct sk_buff, data));
414 }
415 /* BPF compatibility: clear A (%b7) and X (%b8) registers */
416 if (REG_SEEN(BPF_REG_7))
417 /* lghi %b7,0 */
418 EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
419 if (REG_SEEN(BPF_REG_8))
420 /* lghi %b8,0 */
421 EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200422}
423
Michael Holzheu05462312015-04-01 16:08:32 +0200424/*
425 * Function epilogue
426 */
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200427static void bpf_jit_epilogue(struct bpf_jit *jit)
428{
429 /* Return 0 */
430 if (jit->seen & SEEN_RET0) {
431 jit->ret0_ip = jit->prg;
Michael Holzheu05462312015-04-01 16:08:32 +0200432 /* lghi %b0,0 */
433 EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200434 }
435 jit->exit_ip = jit->prg;
Michael Holzheu05462312015-04-01 16:08:32 +0200436 /* Load exit code: lgr %r2,%b0 */
437 EMIT4(0xb9040000, REG_2, BPF_REG_0);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200438 /* Restore registers */
Michael Holzheu05462312015-04-01 16:08:32 +0200439 save_restore_regs(jit, REGS_RESTORE);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200440 /* br %r14 */
Michael Holzheu05462312015-04-01 16:08:32 +0200441 _EMIT2(0x07fe);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200442}
443
444/*
Michael Holzheu05462312015-04-01 16:08:32 +0200445 * Compile one eBPF instruction into s390x code
Michael Holzheub9b4b1c2015-04-29 18:45:03 +0200446 *
447 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
448 * stack space for the large switch statement.
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200449 */
Michael Holzheub9b4b1c2015-04-29 18:45:03 +0200450static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200451{
Michael Holzheu05462312015-04-01 16:08:32 +0200452 struct bpf_insn *insn = &fp->insnsi[i];
453 int jmp_off, last, insn_count = 1;
454 unsigned int func_addr, mask;
455 u32 dst_reg = insn->dst_reg;
456 u32 src_reg = insn->src_reg;
457 u32 *addrs = jit->addrs;
458 s32 imm = insn->imm;
459 s16 off = insn->off;
460
461 switch (insn->code) {
462 /*
463 * BPF_MOV
464 */
465 case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
466 /* llgfr %dst,%src */
467 EMIT4(0xb9160000, dst_reg, src_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200468 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200469 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
470 /* lgr %dst,%src */
471 EMIT4(0xb9040000, dst_reg, src_reg);
472 break;
473 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
474 /* llilf %dst,imm */
475 EMIT6_IMM(0xc00f0000, dst_reg, imm);
476 break;
477 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
478 /* lgfi %dst,imm */
479 EMIT6_IMM(0xc0010000, dst_reg, imm);
480 break;
481 /*
482 * BPF_LD 64
483 */
484 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
485 {
486 /* 16 byte instruction that uses two 'struct bpf_insn' */
487 u64 imm64;
488
489 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
490 /* lg %dst,<d(imm)>(%l) */
491 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
492 EMIT_CONST_U64(imm64));
493 insn_count = 2;
494 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200495 }
Michael Holzheu05462312015-04-01 16:08:32 +0200496 /*
497 * BPF_ADD
498 */
499 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
500 /* ar %dst,%src */
501 EMIT2(0x1a00, dst_reg, src_reg);
502 EMIT_ZERO(dst_reg);
503 break;
504 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
505 /* agr %dst,%src */
506 EMIT4(0xb9080000, dst_reg, src_reg);
507 break;
508 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
509 if (!imm)
510 break;
511 /* alfi %dst,imm */
512 EMIT6_IMM(0xc20b0000, dst_reg, imm);
513 EMIT_ZERO(dst_reg);
514 break;
515 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
516 if (!imm)
517 break;
518 /* agfi %dst,imm */
519 EMIT6_IMM(0xc2080000, dst_reg, imm);
520 break;
521 /*
522 * BPF_SUB
523 */
524 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
525 /* sr %dst,%src */
526 EMIT2(0x1b00, dst_reg, src_reg);
527 EMIT_ZERO(dst_reg);
528 break;
529 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
530 /* sgr %dst,%src */
531 EMIT4(0xb9090000, dst_reg, src_reg);
532 break;
533 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
534 if (!imm)
535 break;
536 /* alfi %dst,-imm */
537 EMIT6_IMM(0xc20b0000, dst_reg, -imm);
538 EMIT_ZERO(dst_reg);
539 break;
540 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
541 if (!imm)
542 break;
543 /* agfi %dst,-imm */
544 EMIT6_IMM(0xc2080000, dst_reg, -imm);
545 break;
546 /*
547 * BPF_MUL
548 */
549 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
550 /* msr %dst,%src */
551 EMIT4(0xb2520000, dst_reg, src_reg);
552 EMIT_ZERO(dst_reg);
553 break;
554 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
555 /* msgr %dst,%src */
556 EMIT4(0xb90c0000, dst_reg, src_reg);
557 break;
558 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
559 if (imm == 1)
560 break;
561 /* msfi %r5,imm */
562 EMIT6_IMM(0xc2010000, dst_reg, imm);
563 EMIT_ZERO(dst_reg);
564 break;
565 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
566 if (imm == 1)
567 break;
568 /* msgfi %dst,imm */
569 EMIT6_IMM(0xc2000000, dst_reg, imm);
570 break;
571 /*
572 * BPF_DIV / BPF_MOD
573 */
574 case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
575 case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
576 {
577 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200578
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200579 jit->seen |= SEEN_RET0;
Michael Holzheu05462312015-04-01 16:08:32 +0200580 /* ltr %src,%src (if src == 0 goto fail) */
581 EMIT2(0x1200, src_reg, src_reg);
582 /* jz <ret0> */
583 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
584 /* lhi %w0,0 */
585 EMIT4_IMM(0xa7080000, REG_W0, 0);
586 /* lr %w1,%dst */
587 EMIT2(0x1800, REG_W1, dst_reg);
588 /* dlr %w0,%src */
589 EMIT4(0xb9970000, REG_W0, src_reg);
590 /* llgfr %dst,%rc */
591 EMIT4(0xb9160000, dst_reg, rc_reg);
592 break;
593 }
Michael Holzheu771aada2015-04-27 11:12:25 +0200594 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
595 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
Michael Holzheu05462312015-04-01 16:08:32 +0200596 {
597 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
598
599 jit->seen |= SEEN_RET0;
600 /* ltgr %src,%src (if src == 0 goto fail) */
601 EMIT4(0xb9020000, src_reg, src_reg);
602 /* jz <ret0> */
603 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
604 /* lghi %w0,0 */
605 EMIT4_IMM(0xa7090000, REG_W0, 0);
606 /* lgr %w1,%dst */
607 EMIT4(0xb9040000, REG_W1, dst_reg);
Michael Holzheu05462312015-04-01 16:08:32 +0200608 /* dlgr %w0,%dst */
Michael Holzheu771aada2015-04-27 11:12:25 +0200609 EMIT4(0xb9870000, REG_W0, src_reg);
Michael Holzheu05462312015-04-01 16:08:32 +0200610 /* lgr %dst,%rc */
611 EMIT4(0xb9040000, dst_reg, rc_reg);
612 break;
613 }
614 case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
615 case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
616 {
617 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
618
619 if (imm == 1) {
620 if (BPF_OP(insn->code) == BPF_MOD)
621 /* lhgi %dst,0 */
622 EMIT4_IMM(0xa7090000, dst_reg, 0);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200623 break;
624 }
Michael Holzheu05462312015-04-01 16:08:32 +0200625 /* lhi %w0,0 */
626 EMIT4_IMM(0xa7080000, REG_W0, 0);
627 /* lr %w1,%dst */
628 EMIT2(0x1800, REG_W1, dst_reg);
629 /* dl %w0,<d(imm)>(%l) */
630 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
631 EMIT_CONST_U32(imm));
632 /* llgfr %dst,%rc */
633 EMIT4(0xb9160000, dst_reg, rc_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200634 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200635 }
Michael Holzheu771aada2015-04-27 11:12:25 +0200636 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
637 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
Michael Holzheu05462312015-04-01 16:08:32 +0200638 {
639 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
640
641 if (imm == 1) {
642 if (BPF_OP(insn->code) == BPF_MOD)
643 /* lhgi %dst,0 */
644 EMIT4_IMM(0xa7090000, dst_reg, 0);
645 break;
646 }
647 /* lghi %w0,0 */
648 EMIT4_IMM(0xa7090000, REG_W0, 0);
649 /* lgr %w1,%dst */
650 EMIT4(0xb9040000, REG_W1, dst_reg);
651 /* dlg %w0,<d(imm)>(%l) */
652 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
Michael Holzheu771aada2015-04-27 11:12:25 +0200653 EMIT_CONST_U64(imm));
Michael Holzheu05462312015-04-01 16:08:32 +0200654 /* lgr %dst,%rc */
655 EMIT4(0xb9040000, dst_reg, rc_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200656 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200657 }
658 /*
659 * BPF_AND
660 */
661 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
662 /* nr %dst,%src */
663 EMIT2(0x1400, dst_reg, src_reg);
664 EMIT_ZERO(dst_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200665 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200666 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
667 /* ngr %dst,%src */
668 EMIT4(0xb9800000, dst_reg, src_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200669 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200670 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
671 /* nilf %dst,imm */
672 EMIT6_IMM(0xc00b0000, dst_reg, imm);
673 EMIT_ZERO(dst_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200674 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200675 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
676 /* ng %dst,<d(imm)>(%l) */
677 EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
678 EMIT_CONST_U64(imm));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200679 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200680 /*
681 * BPF_OR
682 */
683 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
684 /* or %dst,%src */
685 EMIT2(0x1600, dst_reg, src_reg);
686 EMIT_ZERO(dst_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200687 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200688 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
689 /* ogr %dst,%src */
690 EMIT4(0xb9810000, dst_reg, src_reg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200691 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200692 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
693 /* oilf %dst,imm */
694 EMIT6_IMM(0xc00d0000, dst_reg, imm);
695 EMIT_ZERO(dst_reg);
696 break;
697 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
698 /* og %dst,<d(imm)>(%l) */
699 EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
700 EMIT_CONST_U64(imm));
701 break;
702 /*
703 * BPF_XOR
704 */
705 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
706 /* xr %dst,%src */
707 EMIT2(0x1700, dst_reg, src_reg);
708 EMIT_ZERO(dst_reg);
709 break;
710 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
711 /* xgr %dst,%src */
712 EMIT4(0xb9820000, dst_reg, src_reg);
713 break;
714 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
715 if (!imm)
716 break;
717 /* xilf %dst,imm */
718 EMIT6_IMM(0xc0070000, dst_reg, imm);
719 EMIT_ZERO(dst_reg);
720 break;
721 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
722 /* xg %dst,<d(imm)>(%l) */
723 EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
724 EMIT_CONST_U64(imm));
725 break;
726 /*
727 * BPF_LSH
728 */
729 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
730 /* sll %dst,0(%src) */
731 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
732 EMIT_ZERO(dst_reg);
733 break;
734 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
735 /* sllg %dst,%dst,0(%src) */
736 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
737 break;
738 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
739 if (imm == 0)
740 break;
741 /* sll %dst,imm(%r0) */
742 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
743 EMIT_ZERO(dst_reg);
744 break;
745 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
746 if (imm == 0)
747 break;
748 /* sllg %dst,%dst,imm(%r0) */
749 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
750 break;
751 /*
752 * BPF_RSH
753 */
754 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
755 /* srl %dst,0(%src) */
756 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
757 EMIT_ZERO(dst_reg);
758 break;
759 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
760 /* srlg %dst,%dst,0(%src) */
761 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
762 break;
763 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
764 if (imm == 0)
765 break;
766 /* srl %dst,imm(%r0) */
767 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
768 EMIT_ZERO(dst_reg);
769 break;
770 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
771 if (imm == 0)
772 break;
773 /* srlg %dst,%dst,imm(%r0) */
774 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
775 break;
776 /*
777 * BPF_ARSH
778 */
779 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
780 /* srag %dst,%dst,0(%src) */
781 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
782 break;
783 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
784 if (imm == 0)
785 break;
786 /* srag %dst,%dst,imm(%r0) */
787 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
788 break;
789 /*
790 * BPF_NEG
791 */
792 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
793 /* lcr %dst,%dst */
794 EMIT2(0x1300, dst_reg, dst_reg);
795 EMIT_ZERO(dst_reg);
796 break;
797 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
798 /* lcgr %dst,%dst */
799 EMIT4(0xb9130000, dst_reg, dst_reg);
800 break;
801 /*
802 * BPF_FROM_BE/LE
803 */
804 case BPF_ALU | BPF_END | BPF_FROM_BE:
805 /* s390 is big endian, therefore only clear high order bytes */
806 switch (imm) {
807 case 16: /* dst = (u16) cpu_to_be16(dst) */
808 /* llghr %dst,%dst */
809 EMIT4(0xb9850000, dst_reg, dst_reg);
810 break;
811 case 32: /* dst = (u32) cpu_to_be32(dst) */
812 /* llgfr %dst,%dst */
813 EMIT4(0xb9160000, dst_reg, dst_reg);
814 break;
815 case 64: /* dst = (u64) cpu_to_be64(dst) */
816 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200817 }
818 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200819 case BPF_ALU | BPF_END | BPF_FROM_LE:
820 switch (imm) {
821 case 16: /* dst = (u16) cpu_to_le16(dst) */
822 /* lrvr %dst,%dst */
823 EMIT4(0xb91f0000, dst_reg, dst_reg);
824 /* srl %dst,16(%r0) */
825 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
826 /* llghr %dst,%dst */
827 EMIT4(0xb9850000, dst_reg, dst_reg);
828 break;
829 case 32: /* dst = (u32) cpu_to_le32(dst) */
830 /* lrvr %dst,%dst */
831 EMIT4(0xb91f0000, dst_reg, dst_reg);
832 /* llgfr %dst,%dst */
833 EMIT4(0xb9160000, dst_reg, dst_reg);
834 break;
835 case 64: /* dst = (u64) cpu_to_le64(dst) */
836 /* lrvgr %dst,%dst */
837 EMIT4(0xb90f0000, dst_reg, dst_reg);
838 break;
839 }
840 break;
841 /*
842 * BPF_ST(X)
843 */
844 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
845 /* stcy %src,off(%dst) */
846 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
847 jit->seen |= SEEN_MEM;
848 break;
849 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
850 /* sthy %src,off(%dst) */
851 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
852 jit->seen |= SEEN_MEM;
853 break;
854 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
855 /* sty %src,off(%dst) */
856 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
857 jit->seen |= SEEN_MEM;
858 break;
859 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
860 /* stg %src,off(%dst) */
861 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
862 jit->seen |= SEEN_MEM;
863 break;
864 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
865 /* lhi %w0,imm */
866 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
867 /* stcy %w0,off(dst) */
868 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
869 jit->seen |= SEEN_MEM;
870 break;
871 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
872 /* lhi %w0,imm */
873 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
874 /* sthy %w0,off(dst) */
875 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
876 jit->seen |= SEEN_MEM;
877 break;
878 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
879 /* llilf %w0,imm */
880 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
881 /* sty %w0,off(%dst) */
882 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
883 jit->seen |= SEEN_MEM;
884 break;
885 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
886 /* lgfi %w0,imm */
887 EMIT6_IMM(0xc0010000, REG_W0, imm);
888 /* stg %w0,off(%dst) */
889 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
890 jit->seen |= SEEN_MEM;
891 break;
892 /*
893 * BPF_STX XADD (atomic_add)
894 */
895 case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
896 /* laal %w0,%src,off(%dst) */
897 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
898 dst_reg, off);
899 jit->seen |= SEEN_MEM;
900 break;
901 case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
902 /* laalg %w0,%src,off(%dst) */
903 EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
904 dst_reg, off);
905 jit->seen |= SEEN_MEM;
906 break;
907 /*
908 * BPF_LDX
909 */
910 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
911 /* llgc %dst,0(off,%src) */
912 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
913 jit->seen |= SEEN_MEM;
914 break;
915 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
916 /* llgh %dst,0(off,%src) */
917 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
918 jit->seen |= SEEN_MEM;
919 break;
920 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
921 /* llgf %dst,off(%src) */
922 jit->seen |= SEEN_MEM;
923 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
924 break;
925 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
926 /* lg %dst,0(off,%src) */
927 jit->seen |= SEEN_MEM;
928 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
929 break;
930 /*
931 * BPF_JMP / CALL
932 */
933 case BPF_JMP | BPF_CALL:
934 {
935 /*
936 * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5)
937 */
938 const u64 func = (u64)__bpf_call_base + imm;
939
940 REG_SET_SEEN(BPF_REG_5);
941 jit->seen |= SEEN_FUNC;
942 /* lg %w1,<d(imm)>(%l) */
943 EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
944 EMIT_CONST_U64(func));
945 /* basr %r14,%w1 */
946 EMIT2(0x0d00, REG_14, REG_W1);
947 /* lgr %b0,%r2: load return value into %b0 */
948 EMIT4(0xb9040000, BPF_REG_0, REG_2);
949 break;
950 }
951 case BPF_JMP | BPF_EXIT: /* return b0 */
952 last = (i == fp->len - 1) ? 1 : 0;
953 if (last && !(jit->seen & SEEN_RET0))
954 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200955 /* j <exit> */
956 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
957 break;
Michael Holzheu05462312015-04-01 16:08:32 +0200958 /*
959 * Branch relative (number of skipped instructions) to offset on
960 * condition.
961 *
962 * Condition code to mask mapping:
963 *
964 * CC | Description | Mask
965 * ------------------------------
966 * 0 | Operands equal | 8
967 * 1 | First operand low | 4
968 * 2 | First operand high | 2
969 * 3 | Unused | 1
970 *
971 * For s390x relative branches: ip = ip + off_bytes
972 * For BPF relative branches: insn = insn + off_insns + 1
973 *
974 * For example for s390x with offset 0 we jump to the branch
975 * instruction itself (loop) and for BPF with offset 0 we
976 * branch to the instruction behind the branch.
977 */
978 case BPF_JMP | BPF_JA: /* if (true) */
979 mask = 0xf000; /* j */
980 goto branch_oc;
981 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
982 mask = 0x2000; /* jh */
983 goto branch_ks;
984 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
985 mask = 0xa000; /* jhe */
986 goto branch_ks;
987 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
988 mask = 0x2000; /* jh */
989 goto branch_ku;
990 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
991 mask = 0xa000; /* jhe */
992 goto branch_ku;
993 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
994 mask = 0x7000; /* jne */
995 goto branch_ku;
996 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
997 mask = 0x8000; /* je */
998 goto branch_ku;
999 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1000 mask = 0x7000; /* jnz */
1001 /* lgfi %w1,imm (load sign extend imm) */
1002 EMIT6_IMM(0xc0010000, REG_W1, imm);
1003 /* ngr %w1,%dst */
1004 EMIT4(0xb9800000, REG_W1, dst_reg);
1005 goto branch_oc;
1006
1007 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1008 mask = 0x2000; /* jh */
1009 goto branch_xs;
1010 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1011 mask = 0xa000; /* jhe */
1012 goto branch_xs;
1013 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1014 mask = 0x2000; /* jh */
1015 goto branch_xu;
1016 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1017 mask = 0xa000; /* jhe */
1018 goto branch_xu;
1019 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1020 mask = 0x7000; /* jne */
1021 goto branch_xu;
1022 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1023 mask = 0x8000; /* je */
1024 goto branch_xu;
1025 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1026 mask = 0x7000; /* jnz */
1027 /* ngrk %w1,%dst,%src */
1028 EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
1029 goto branch_oc;
1030branch_ks:
1031 /* lgfi %w1,imm (load sign extend imm) */
1032 EMIT6_IMM(0xc0010000, REG_W1, imm);
1033 /* cgrj %dst,%w1,mask,off */
1034 EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001035 break;
Michael Holzheu05462312015-04-01 16:08:32 +02001036branch_ku:
1037 /* lgfi %w1,imm (load sign extend imm) */
1038 EMIT6_IMM(0xc0010000, REG_W1, imm);
1039 /* clgrj %dst,%w1,mask,off */
1040 EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001041 break;
Michael Holzheu05462312015-04-01 16:08:32 +02001042branch_xs:
1043 /* cgrj %dst,%src,mask,off */
1044 EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001045 break;
Michael Holzheu05462312015-04-01 16:08:32 +02001046branch_xu:
1047 /* clgrj %dst,%src,mask,off */
1048 EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001049 break;
Michael Holzheu05462312015-04-01 16:08:32 +02001050branch_oc:
1051 /* brc mask,jmp_off (branch instruction needs 4 bytes) */
1052 jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
1053 EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001054 break;
Michael Holzheu05462312015-04-01 16:08:32 +02001055 /*
1056 * BPF_LD
1057 */
1058 case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */
1059 case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */
1060 if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1061 func_addr = __pa(sk_load_byte_pos);
1062 else
1063 func_addr = __pa(sk_load_byte);
1064 goto call_fn;
1065 case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */
1066 case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */
1067 if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1068 func_addr = __pa(sk_load_half_pos);
1069 else
1070 func_addr = __pa(sk_load_half);
1071 goto call_fn;
1072 case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */
1073 case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */
1074 if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1075 func_addr = __pa(sk_load_word_pos);
1076 else
1077 func_addr = __pa(sk_load_word);
1078 goto call_fn;
1079call_fn:
1080 jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC;
1081 REG_SET_SEEN(REG_14); /* Return address of possible func call */
1082
1083 /*
1084 * Implicit input:
1085 * BPF_REG_6 (R7) : skb pointer
1086 * REG_SKB_DATA (R12): skb data pointer
1087 *
1088 * Calculated input:
1089 * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb
1090 * BPF_REG_5 (R6) : return address
1091 *
1092 * Output:
1093 * BPF_REG_0 (R14): data read from skb
1094 *
1095 * Scratch registers (BPF_REG_1-5)
1096 */
1097
1098 /* Call function: llilf %w1,func_addr */
1099 EMIT6_IMM(0xc00f0000, REG_W1, func_addr);
1100
1101 /* Offset: lgfi %b2,imm */
1102 EMIT6_IMM(0xc0010000, BPF_REG_2, imm);
1103 if (BPF_MODE(insn->code) == BPF_IND)
1104 /* agfr %b2,%src (%src is s32 here) */
1105 EMIT4(0xb9180000, BPF_REG_2, src_reg);
1106
1107 /* basr %b5,%w1 (%b5 is call saved) */
1108 EMIT2(0x0d00, BPF_REG_5, REG_W1);
1109
1110 /*
1111 * Note: For fast access we jump directly after the
1112 * jnz instruction from bpf_jit.S
1113 */
1114 /* jnz <ret0> */
1115 EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001116 break;
1117 default: /* too complex, give up */
Michael Holzheu05462312015-04-01 16:08:32 +02001118 pr_err("Unknown opcode %02x\n", insn->code);
1119 return -1;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001120 }
Michael Holzheu05462312015-04-01 16:08:32 +02001121 return insn_count;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001122}
1123
Michael Holzheu05462312015-04-01 16:08:32 +02001124/*
1125 * Compile eBPF program into s390x code
1126 */
1127static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
1128{
1129 int i, insn_count;
1130
1131 jit->lit = jit->lit_start;
1132 jit->prg = 0;
1133
1134 bpf_jit_prologue(jit);
1135 for (i = 0; i < fp->len; i += insn_count) {
1136 insn_count = bpf_jit_insn(jit, fp, i);
1137 if (insn_count < 0)
1138 return -1;
1139 jit->addrs[i + 1] = jit->prg; /* Next instruction address */
1140 }
1141 bpf_jit_epilogue(jit);
1142
1143 jit->lit_start = jit->prg;
1144 jit->size = jit->lit;
1145 jit->size_prg = jit->prg;
1146 return 0;
1147}
1148
1149/*
1150 * Classic BPF function stub. BPF programs will be converted into
1151 * eBPF and then bpf_int_jit_compile() will be called.
1152 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001153void bpf_jit_compile(struct bpf_prog *fp)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001154{
Michael Holzheu05462312015-04-01 16:08:32 +02001155}
1156
1157/*
1158 * Compile eBPF program "fp"
1159 */
1160void bpf_int_jit_compile(struct bpf_prog *fp)
1161{
1162 struct bpf_binary_header *header;
1163 struct bpf_jit jit;
1164 int pass;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001165
1166 if (!bpf_jit_enable)
1167 return;
Michael Holzheu05462312015-04-01 16:08:32 +02001168 memset(&jit, 0, sizeof(jit));
1169 jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1170 if (jit.addrs == NULL)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001171 return;
Michael Holzheu05462312015-04-01 16:08:32 +02001172 /*
1173 * Three initial passes:
1174 * - 1/2: Determine clobbered registers
1175 * - 3: Calculate program size and addrs arrray
1176 */
1177 for (pass = 1; pass <= 3; pass++) {
1178 if (bpf_jit_prog(&jit, fp))
1179 goto free_addrs;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001180 }
Michael Holzheu05462312015-04-01 16:08:32 +02001181 /*
1182 * Final pass: Allocate and generate program
1183 */
1184 if (jit.size >= BPF_SIZE_MAX)
1185 goto free_addrs;
1186 header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1187 if (!header)
1188 goto free_addrs;
1189 if (bpf_jit_prog(&jit, fp))
1190 goto free_addrs;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001191 if (bpf_jit_enable > 1) {
Michael Holzheu05462312015-04-01 16:08:32 +02001192 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1193 if (jit.prg_buf)
1194 print_fn_code(jit.prg_buf, jit.size_prg);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001195 }
Michael Holzheu05462312015-04-01 16:08:32 +02001196 if (jit.prg_buf) {
Heiko Carstensaa2d2c72013-07-16 13:25:49 +02001197 set_memory_ro((unsigned long)header, header->pages);
Michael Holzheu05462312015-04-01 16:08:32 +02001198 fp->bpf_func = (void *) jit.prg_buf;
Daniel Borkmann286aad32014-09-08 08:04:49 +02001199 fp->jited = true;
Heiko Carstensaa2d2c72013-07-16 13:25:49 +02001200 }
Michael Holzheu05462312015-04-01 16:08:32 +02001201free_addrs:
1202 kfree(jit.addrs);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001203}
1204
Michael Holzheu05462312015-04-01 16:08:32 +02001205/*
1206 * Free eBPF program
1207 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001208void bpf_jit_free(struct bpf_prog *fp)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001209{
Heiko Carstensaa2d2c72013-07-16 13:25:49 +02001210 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1211 struct bpf_binary_header *header = (void *)addr;
1212
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +01001213 if (!fp->jited)
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -07001214 goto free_filter;
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +01001215
Heiko Carstensaa2d2c72013-07-16 13:25:49 +02001216 set_memory_rw(addr, header->pages);
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001217 bpf_jit_binary_free(header);
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +01001218
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -07001219free_filter:
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001220 bpf_prog_unlock_free(fp);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001221}