blob: 452d3ebd9d0fba3b513a6978b096bd22b27c1b46 [file] [log] [blame]
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001/*
2 * BPF Jit compiler for s390.
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8#include <linux/moduleloader.h>
9#include <linux/netdevice.h>
Heiko Carstens5303a0f2013-02-09 14:07:50 +010010#include <linux/if_vlan.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020011#include <linux/filter.h>
Heiko Carstensaa2d2c72013-07-16 13:25:49 +020012#include <linux/random.h>
Heiko Carstensc9a7afa2013-07-17 14:26:50 +020013#include <linux/init.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020014#include <asm/cacheflush.h>
Heiko Carstens68d98842012-08-28 15:36:14 +020015#include <asm/facility.h>
Heiko Carstens0f208222013-09-13 13:36:25 +020016#include <asm/dis.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020017
18/*
19 * Conventions:
20 * %r2 = skb pointer
21 * %r3 = offset parameter
22 * %r4 = scratch register / length parameter
23 * %r5 = BPF A accumulator
24 * %r8 = return address
25 * %r9 = save register for skb pointer
26 * %r10 = skb->data
27 * %r11 = skb->len - skb->data_len (headlen)
28 * %r12 = BPF X accumulator
29 * %r13 = literal pool pointer
30 * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
31 */
32int bpf_jit_enable __read_mostly;
33
34/*
35 * assembly code in arch/x86/net/bpf_jit.S
36 */
37extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
38extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
39
40struct bpf_jit {
41 unsigned int seen;
42 u8 *start;
43 u8 *prg;
44 u8 *mid;
45 u8 *lit;
46 u8 *end;
47 u8 *base_ip;
48 u8 *ret0_ip;
49 u8 *exit_ip;
50 unsigned int off_load_word;
51 unsigned int off_load_half;
52 unsigned int off_load_byte;
53 unsigned int off_load_bmsh;
54 unsigned int off_load_iword;
55 unsigned int off_load_ihalf;
56 unsigned int off_load_ibyte;
57};
58
59#define BPF_SIZE_MAX 4096 /* Max size for program */
60
61#define SEEN_DATAREF 1 /* might call external helpers */
62#define SEEN_XREG 2 /* ebx is used */
63#define SEEN_MEM 4 /* use mem[] for temporary storage */
64#define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
65#define SEEN_LITERAL 16 /* code uses literals */
66#define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
67#define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
68#define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
69#define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
70#define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
71#define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
72#define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
73
74#define EMIT2(op) \
75({ \
76 if (jit->prg + 2 <= jit->mid) \
77 *(u16 *) jit->prg = op; \
78 jit->prg += 2; \
79})
80
81#define EMIT4(op) \
82({ \
83 if (jit->prg + 4 <= jit->mid) \
84 *(u32 *) jit->prg = op; \
85 jit->prg += 4; \
86})
87
88#define EMIT4_DISP(op, disp) \
89({ \
90 unsigned int __disp = (disp) & 0xfff; \
91 EMIT4(op | __disp); \
92})
93
94#define EMIT4_IMM(op, imm) \
95({ \
96 unsigned int __imm = (imm) & 0xffff; \
97 EMIT4(op | __imm); \
98})
99
100#define EMIT4_PCREL(op, pcrel) \
101({ \
102 long __pcrel = ((pcrel) >> 1) & 0xffff; \
103 EMIT4(op | __pcrel); \
104})
105
106#define EMIT6(op1, op2) \
107({ \
108 if (jit->prg + 6 <= jit->mid) { \
109 *(u32 *) jit->prg = op1; \
110 *(u16 *) (jit->prg + 4) = op2; \
111 } \
112 jit->prg += 6; \
113})
114
115#define EMIT6_DISP(op1, op2, disp) \
116({ \
117 unsigned int __disp = (disp) & 0xfff; \
118 EMIT6(op1 | __disp, op2); \
119})
120
Heiko Carstens68d98842012-08-28 15:36:14 +0200121#define EMIT6_IMM(op, imm) \
122({ \
123 unsigned int __imm = (imm); \
124 EMIT6(op | (__imm >> 16), __imm & 0xffff); \
125})
126
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200127#define EMIT_CONST(val) \
128({ \
129 unsigned int ret; \
130 ret = (unsigned int) (jit->lit - jit->base_ip); \
131 jit->seen |= SEEN_LITERAL; \
132 if (jit->lit + 4 <= jit->end) \
133 *(u32 *) jit->lit = val; \
134 jit->lit += 4; \
135 ret; \
136})
137
138#define EMIT_FN_CONST(bit, fn) \
139({ \
140 unsigned int ret; \
141 ret = (unsigned int) (jit->lit - jit->base_ip); \
142 if (jit->seen & bit) { \
143 jit->seen |= SEEN_LITERAL; \
144 if (jit->lit + 8 <= jit->end) \
145 *(void **) jit->lit = fn; \
146 jit->lit += 8; \
147 } \
148 ret; \
149})
150
151static void bpf_jit_prologue(struct bpf_jit *jit)
152{
153 /* Save registers and create stack frame if necessary */
154 if (jit->seen & SEEN_DATAREF) {
155 /* stmg %r8,%r15,88(%r15) */
156 EMIT6(0xeb8ff058, 0x0024);
157 /* lgr %r14,%r15 */
158 EMIT4(0xb90400ef);
Martin Schwidefsky6cef3002013-10-04 11:12:16 +0200159 /* aghi %r15,<offset> */
160 EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200161 /* stg %r14,152(%r15) */
162 EMIT6(0xe3e0f098, 0x0024);
163 } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
164 /* stmg %r12,%r13,120(%r15) */
165 EMIT6(0xebcdf078, 0x0024);
166 else if (jit->seen & SEEN_XREG)
167 /* stg %r12,120(%r15) */
168 EMIT6(0xe3c0f078, 0x0024);
169 else if (jit->seen & SEEN_LITERAL)
170 /* stg %r13,128(%r15) */
171 EMIT6(0xe3d0f080, 0x0024);
172
173 /* Setup literal pool */
174 if (jit->seen & SEEN_LITERAL) {
175 /* basr %r13,0 */
176 EMIT2(0x0dd0);
177 jit->base_ip = jit->prg;
178 }
179 jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
180 jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
181 jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
182 jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
183 jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
184 jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
185 jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
186
187 /* Filter needs to access skb data */
188 if (jit->seen & SEEN_DATAREF) {
189 /* l %r11,<len>(%r2) */
190 EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
191 /* s %r11,<data_len>(%r2) */
192 EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
193 /* lg %r10,<data>(%r2) */
194 EMIT6_DISP(0xe3a02000, 0x0004,
195 offsetof(struct sk_buff, data));
196 }
197}
198
199static void bpf_jit_epilogue(struct bpf_jit *jit)
200{
201 /* Return 0 */
202 if (jit->seen & SEEN_RET0) {
203 jit->ret0_ip = jit->prg;
204 /* lghi %r2,0 */
205 EMIT4(0xa7290000);
206 }
207 jit->exit_ip = jit->prg;
208 /* Restore registers */
209 if (jit->seen & SEEN_DATAREF)
210 /* lmg %r8,%r15,<offset>(%r15) */
211 EMIT6_DISP(0xeb8ff000, 0x0004,
212 (jit->seen & SEEN_MEM) ? 200 : 168);
213 else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
214 /* lmg %r12,%r13,120(%r15) */
215 EMIT6(0xebcdf078, 0x0004);
216 else if (jit->seen & SEEN_XREG)
217 /* lg %r12,120(%r15) */
218 EMIT6(0xe3c0f078, 0x0004);
219 else if (jit->seen & SEEN_LITERAL)
220 /* lg %r13,128(%r15) */
221 EMIT6(0xe3d0f080, 0x0004);
222 /* br %r14 */
223 EMIT2(0x07fe);
224}
225
Heiko Carstensc9a7afa2013-07-17 14:26:50 +0200226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234 struct sk_buff skb_probe = {
235 .pkt_type = ~0,
236 };
237 char *ct = (char *)&skb_probe;
238 int off;
239
240 pkt_type_offset = -1;
241 for (off = 0; off < sizeof(struct sk_buff); off++) {
242 if (!ct[off])
243 continue;
244 if (ct[off] == PKT_TYPE_MAX)
245 pkt_type_offset = off;
246 else {
247 /* Found non matching bit pattern, fix needed. */
248 WARN_ON_ONCE(1);
249 pkt_type_offset = -1;
250 return -1;
251 }
252 }
253 return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200257/*
258 * make sure we dont leak kernel information to user
259 */
260static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
261{
262 /* Clear temporary memory if (seen & SEEN_MEM) */
263 if (jit->seen & SEEN_MEM)
264 /* xc 0(64,%r15),0(%r15) */
265 EMIT6(0xd73ff000, 0xf000);
266 /* Clear X if (seen & SEEN_XREG) */
267 if (jit->seen & SEEN_XREG)
268 /* lhi %r12,0 */
269 EMIT4(0xa7c80000);
270 /* Clear A if the first register does not set it. */
271 switch (filter[0].code) {
272 case BPF_S_LD_W_ABS:
273 case BPF_S_LD_H_ABS:
274 case BPF_S_LD_B_ABS:
275 case BPF_S_LD_W_LEN:
276 case BPF_S_LD_W_IND:
277 case BPF_S_LD_H_IND:
278 case BPF_S_LD_B_IND:
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200279 case BPF_S_LD_IMM:
280 case BPF_S_LD_MEM:
281 case BPF_S_MISC_TXA:
282 case BPF_S_ANC_PROTOCOL:
283 case BPF_S_ANC_PKTTYPE:
284 case BPF_S_ANC_IFINDEX:
285 case BPF_S_ANC_MARK:
286 case BPF_S_ANC_QUEUE:
287 case BPF_S_ANC_HATYPE:
288 case BPF_S_ANC_RXHASH:
289 case BPF_S_ANC_CPU:
Heiko Carstens5303a0f2013-02-09 14:07:50 +0100290 case BPF_S_ANC_VLAN_TAG:
291 case BPF_S_ANC_VLAN_TAG_PRESENT:
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200292 case BPF_S_RET_K:
293 /* first instruction sets A register */
294 break;
295 default: /* A = 0 */
296 /* lhi %r5,0 */
297 EMIT4(0xa7580000);
298 }
299}
300
301static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
302 unsigned int *addrs, int i, int last)
303{
304 unsigned int K;
305 int offset;
306 unsigned int mask;
307
308 K = filter->k;
309 switch (filter->code) {
310 case BPF_S_ALU_ADD_X: /* A += X */
311 jit->seen |= SEEN_XREG;
312 /* ar %r5,%r12 */
313 EMIT2(0x1a5c);
314 break;
315 case BPF_S_ALU_ADD_K: /* A += K */
316 if (!K)
317 break;
318 if (K <= 16383)
319 /* ahi %r5,<K> */
320 EMIT4_IMM(0xa75a0000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200321 else if (test_facility(21))
322 /* alfi %r5,<K> */
323 EMIT6_IMM(0xc25b0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200324 else
325 /* a %r5,<d(K)>(%r13) */
326 EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
327 break;
328 case BPF_S_ALU_SUB_X: /* A -= X */
329 jit->seen |= SEEN_XREG;
330 /* sr %r5,%r12 */
331 EMIT2(0x1b5c);
332 break;
333 case BPF_S_ALU_SUB_K: /* A -= K */
334 if (!K)
335 break;
336 if (K <= 16384)
337 /* ahi %r5,-K */
338 EMIT4_IMM(0xa75a0000, -K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200339 else if (test_facility(21))
340 /* alfi %r5,-K */
341 EMIT6_IMM(0xc25b0000, -K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200342 else
343 /* s %r5,<d(K)>(%r13) */
344 EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
345 break;
346 case BPF_S_ALU_MUL_X: /* A *= X */
347 jit->seen |= SEEN_XREG;
348 /* msr %r5,%r12 */
349 EMIT4(0xb252005c);
350 break;
351 case BPF_S_ALU_MUL_K: /* A *= K */
352 if (K <= 16383)
353 /* mhi %r5,K */
354 EMIT4_IMM(0xa75c0000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200355 else if (test_facility(34))
356 /* msfi %r5,<K> */
357 EMIT6_IMM(0xc2510000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200358 else
359 /* ms %r5,<d(K)>(%r13) */
360 EMIT4_DISP(0x7150d000, EMIT_CONST(K));
361 break;
362 case BPF_S_ALU_DIV_X: /* A /= X */
363 jit->seen |= SEEN_XREG | SEEN_RET0;
364 /* ltr %r12,%r12 */
365 EMIT2(0x12cc);
366 /* jz <ret0> */
367 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
368 /* lhi %r4,0 */
369 EMIT4(0xa7480000);
Heiko Carstens3af57f72014-01-17 09:37:15 +0100370 /* dlr %r4,%r12 */
371 EMIT4(0xb997004c);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200372 break;
Eric Dumazetaee636c2014-01-15 06:50:07 -0800373 case BPF_S_ALU_DIV_K: /* A /= K */
374 if (K == 1)
375 break;
376 /* lhi %r4,0 */
377 EMIT4(0xa7480000);
Heiko Carstens3af57f72014-01-17 09:37:15 +0100378 /* dl %r4,<d(K)>(%r13) */
379 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200380 break;
Heiko Carstens32472742012-12-01 12:29:08 +0100381 case BPF_S_ALU_MOD_X: /* A %= X */
382 jit->seen |= SEEN_XREG | SEEN_RET0;
383 /* ltr %r12,%r12 */
384 EMIT2(0x12cc);
385 /* jz <ret0> */
386 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
387 /* lhi %r4,0 */
388 EMIT4(0xa7480000);
Heiko Carstens3af57f72014-01-17 09:37:15 +0100389 /* dlr %r4,%r12 */
390 EMIT4(0xb997004c);
Heiko Carstens32472742012-12-01 12:29:08 +0100391 /* lr %r5,%r4 */
392 EMIT2(0x1854);
393 break;
394 case BPF_S_ALU_MOD_K: /* A %= K */
Eric Dumazetaee636c2014-01-15 06:50:07 -0800395 if (K == 1) {
396 /* lhi %r5,0 */
397 EMIT4(0xa7580000);
398 break;
399 }
Heiko Carstens32472742012-12-01 12:29:08 +0100400 /* lhi %r4,0 */
401 EMIT4(0xa7480000);
Heiko Carstens3af57f72014-01-17 09:37:15 +0100402 /* dl %r4,<d(K)>(%r13) */
403 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
Heiko Carstens32472742012-12-01 12:29:08 +0100404 /* lr %r5,%r4 */
405 EMIT2(0x1854);
406 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200407 case BPF_S_ALU_AND_X: /* A &= X */
408 jit->seen |= SEEN_XREG;
409 /* nr %r5,%r12 */
410 EMIT2(0x145c);
411 break;
412 case BPF_S_ALU_AND_K: /* A &= K */
Heiko Carstens68d98842012-08-28 15:36:14 +0200413 if (test_facility(21))
414 /* nilf %r5,<K> */
415 EMIT6_IMM(0xc05b0000, K);
416 else
417 /* n %r5,<d(K)>(%r13) */
418 EMIT4_DISP(0x5450d000, EMIT_CONST(K));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200419 break;
420 case BPF_S_ALU_OR_X: /* A |= X */
421 jit->seen |= SEEN_XREG;
422 /* or %r5,%r12 */
423 EMIT2(0x165c);
424 break;
425 case BPF_S_ALU_OR_K: /* A |= K */
Heiko Carstens68d98842012-08-28 15:36:14 +0200426 if (test_facility(21))
427 /* oilf %r5,<K> */
428 EMIT6_IMM(0xc05d0000, K);
429 else
430 /* o %r5,<d(K)>(%r13) */
431 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200432 break;
Heiko Carstensc59eed12012-09-24 08:31:35 +0200433 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
Heiko Carstens916908d2012-12-01 12:42:32 +0100434 case BPF_S_ALU_XOR_X:
Heiko Carstensc59eed12012-09-24 08:31:35 +0200435 jit->seen |= SEEN_XREG;
436 /* xr %r5,%r12 */
437 EMIT2(0x175c);
438 break;
Heiko Carstens916908d2012-12-01 12:42:32 +0100439 case BPF_S_ALU_XOR_K: /* A ^= K */
440 if (!K)
441 break;
442 /* x %r5,<d(K)>(%r13) */
443 EMIT4_DISP(0x5750d000, EMIT_CONST(K));
444 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200445 case BPF_S_ALU_LSH_X: /* A <<= X; */
446 jit->seen |= SEEN_XREG;
447 /* sll %r5,0(%r12) */
448 EMIT4(0x8950c000);
449 break;
450 case BPF_S_ALU_LSH_K: /* A <<= K */
451 if (K == 0)
452 break;
453 /* sll %r5,K */
454 EMIT4_DISP(0x89500000, K);
455 break;
456 case BPF_S_ALU_RSH_X: /* A >>= X; */
457 jit->seen |= SEEN_XREG;
458 /* srl %r5,0(%r12) */
459 EMIT4(0x8850c000);
460 break;
461 case BPF_S_ALU_RSH_K: /* A >>= K; */
462 if (K == 0)
463 break;
464 /* srl %r5,K */
465 EMIT4_DISP(0x88500000, K);
466 break;
467 case BPF_S_ALU_NEG: /* A = -A */
468 /* lnr %r5,%r5 */
469 EMIT2(0x1155);
470 break;
471 case BPF_S_JMP_JA: /* ip += K */
472 offset = addrs[i + K] + jit->start - jit->prg;
473 EMIT4_PCREL(0xa7f40000, offset);
474 break;
475 case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
476 mask = 0x200000; /* jh */
477 goto kbranch;
478 case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
479 mask = 0xa00000; /* jhe */
480 goto kbranch;
481 case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
482 mask = 0x800000; /* je */
483kbranch: /* Emit compare if the branch targets are different */
484 if (filter->jt != filter->jf) {
485 if (K <= 16383)
486 /* chi %r5,<K> */
487 EMIT4_IMM(0xa75e0000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200488 else if (test_facility(21))
489 /* clfi %r5,<K> */
490 EMIT6_IMM(0xc25f0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200491 else
492 /* c %r5,<d(K)>(%r13) */
493 EMIT4_DISP(0x5950d000, EMIT_CONST(K));
494 }
495branch: if (filter->jt == filter->jf) {
496 if (filter->jt == 0)
497 break;
498 /* j <jt> */
499 offset = addrs[i + filter->jt] + jit->start - jit->prg;
500 EMIT4_PCREL(0xa7f40000, offset);
501 break;
502 }
503 if (filter->jt != 0) {
504 /* brc <mask>,<jt> */
505 offset = addrs[i + filter->jt] + jit->start - jit->prg;
506 EMIT4_PCREL(0xa7040000 | mask, offset);
507 }
508 if (filter->jf != 0) {
509 /* brc <mask^15>,<jf> */
510 offset = addrs[i + filter->jf] + jit->start - jit->prg;
511 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
512 }
513 break;
514 case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
515 mask = 0x700000; /* jnz */
516 /* Emit test if the branch targets are different */
517 if (filter->jt != filter->jf) {
518 if (K > 65535) {
519 /* lr %r4,%r5 */
520 EMIT2(0x1845);
521 /* n %r4,<d(K)>(%r13) */
522 EMIT4_DISP(0x5440d000, EMIT_CONST(K));
523 } else
524 /* tmll %r5,K */
525 EMIT4_IMM(0xa7510000, K);
526 }
527 goto branch;
528 case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
529 mask = 0x200000; /* jh */
530 goto xbranch;
531 case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
532 mask = 0xa00000; /* jhe */
533 goto xbranch;
534 case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
535 mask = 0x800000; /* je */
536xbranch: /* Emit compare if the branch targets are different */
537 if (filter->jt != filter->jf) {
538 jit->seen |= SEEN_XREG;
539 /* cr %r5,%r12 */
540 EMIT2(0x195c);
541 }
542 goto branch;
543 case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
544 mask = 0x700000; /* jnz */
545 /* Emit test if the branch targets are different */
546 if (filter->jt != filter->jf) {
547 jit->seen |= SEEN_XREG;
548 /* lr %r4,%r5 */
549 EMIT2(0x1845);
550 /* nr %r4,%r12 */
551 EMIT2(0x144c);
552 }
553 goto branch;
554 case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
555 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
556 offset = jit->off_load_word;
557 goto load_abs;
558 case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
559 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
560 offset = jit->off_load_half;
561 goto load_abs;
562 case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
563 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
564 offset = jit->off_load_byte;
565load_abs: if ((int) K < 0)
566 goto out;
567call_fn: /* lg %r1,<d(function)>(%r13) */
568 EMIT6_DISP(0xe310d000, 0x0004, offset);
569 /* l %r3,<d(K)>(%r13) */
570 EMIT4_DISP(0x5830d000, EMIT_CONST(K));
571 /* basr %r8,%r1 */
572 EMIT2(0x0d81);
573 /* jnz <ret0> */
574 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
575 break;
576 case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
577 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
578 offset = jit->off_load_iword;
579 goto call_fn;
580 case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
581 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
582 offset = jit->off_load_ihalf;
583 goto call_fn;
584 case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
585 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
586 offset = jit->off_load_ibyte;
587 goto call_fn;
588 case BPF_S_LDX_B_MSH:
589 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
590 jit->seen |= SEEN_RET0;
591 if ((int) K < 0) {
592 /* j <ret0> */
593 EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
594 break;
595 }
596 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
597 offset = jit->off_load_bmsh;
598 goto call_fn;
599 case BPF_S_LD_W_LEN: /* A = skb->len; */
600 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
601 /* l %r5,<d(len)>(%r2) */
602 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
603 break;
604 case BPF_S_LDX_W_LEN: /* X = skb->len; */
605 jit->seen |= SEEN_XREG;
606 /* l %r12,<d(len)>(%r2) */
607 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
608 break;
609 case BPF_S_LD_IMM: /* A = K */
610 if (K <= 16383)
611 /* lhi %r5,K */
612 EMIT4_IMM(0xa7580000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200613 else if (test_facility(21))
614 /* llilf %r5,<K> */
615 EMIT6_IMM(0xc05f0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200616 else
617 /* l %r5,<d(K)>(%r13) */
618 EMIT4_DISP(0x5850d000, EMIT_CONST(K));
619 break;
620 case BPF_S_LDX_IMM: /* X = K */
621 jit->seen |= SEEN_XREG;
622 if (K <= 16383)
623 /* lhi %r12,<K> */
624 EMIT4_IMM(0xa7c80000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200625 else if (test_facility(21))
626 /* llilf %r12,<K> */
627 EMIT6_IMM(0xc0cf0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200628 else
629 /* l %r12,<d(K)>(%r13) */
630 EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
631 break;
632 case BPF_S_LD_MEM: /* A = mem[K] */
633 jit->seen |= SEEN_MEM;
634 /* l %r5,<K>(%r15) */
635 EMIT4_DISP(0x5850f000,
636 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
637 break;
638 case BPF_S_LDX_MEM: /* X = mem[K] */
639 jit->seen |= SEEN_XREG | SEEN_MEM;
640 /* l %r12,<K>(%r15) */
641 EMIT4_DISP(0x58c0f000,
642 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
643 break;
644 case BPF_S_MISC_TAX: /* X = A */
645 jit->seen |= SEEN_XREG;
646 /* lr %r12,%r5 */
647 EMIT2(0x18c5);
648 break;
649 case BPF_S_MISC_TXA: /* A = X */
650 jit->seen |= SEEN_XREG;
651 /* lr %r5,%r12 */
652 EMIT2(0x185c);
653 break;
654 case BPF_S_RET_K:
655 if (K == 0) {
656 jit->seen |= SEEN_RET0;
657 if (last)
658 break;
659 /* j <ret0> */
660 EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
661 } else {
662 if (K <= 16383)
663 /* lghi %r2,K */
664 EMIT4_IMM(0xa7290000, K);
665 else
666 /* llgf %r2,<K>(%r13) */
667 EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
668 /* j <exit> */
669 if (last && !(jit->seen & SEEN_RET0))
670 break;
671 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
672 }
673 break;
674 case BPF_S_RET_A:
675 /* llgfr %r2,%r5 */
676 EMIT4(0xb9160025);
677 /* j <exit> */
678 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
679 break;
680 case BPF_S_ST: /* mem[K] = A */
681 jit->seen |= SEEN_MEM;
682 /* st %r5,<K>(%r15) */
683 EMIT4_DISP(0x5050f000,
684 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
685 break;
686 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
687 jit->seen |= SEEN_XREG | SEEN_MEM;
688 /* st %r12,<K>(%r15) */
689 EMIT4_DISP(0x50c0f000,
690 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
691 break;
692 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
693 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
694 /* lhi %r5,0 */
695 EMIT4(0xa7580000);
696 /* icm %r5,3,<d(protocol)>(%r2) */
697 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
698 break;
699 case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
700 * A = skb->dev->ifindex */
701 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
702 jit->seen |= SEEN_RET0;
703 /* lg %r1,<d(dev)>(%r2) */
704 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
705 /* ltgr %r1,%r1 */
706 EMIT4(0xb9020011);
707 /* jz <ret0> */
708 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
709 /* l %r5,<d(ifindex)>(%r1) */
710 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
711 break;
712 case BPF_S_ANC_MARK: /* A = skb->mark */
713 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
714 /* l %r5,<d(mark)>(%r2) */
715 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
716 break;
717 case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
718 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
719 /* lhi %r5,0 */
720 EMIT4(0xa7580000);
721 /* icm %r5,3,<d(queue_mapping)>(%r2) */
722 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
723 break;
724 case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
725 * A = skb->dev->type */
726 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
727 jit->seen |= SEEN_RET0;
728 /* lg %r1,<d(dev)>(%r2) */
729 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
730 /* ltgr %r1,%r1 */
731 EMIT4(0xb9020011);
732 /* jz <ret0> */
733 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
734 /* lhi %r5,0 */
735 EMIT4(0xa7580000);
736 /* icm %r5,3,<d(type)>(%r1) */
737 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
738 break;
Tom Herbert61b905d2014-03-24 15:34:47 -0700739 case BPF_S_ANC_RXHASH: /* A = skb->hash */
740 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
741 /* l %r5,<d(hash)>(%r2) */
742 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200743 break;
Heiko Carstens5303a0f2013-02-09 14:07:50 +0100744 case BPF_S_ANC_VLAN_TAG:
745 case BPF_S_ANC_VLAN_TAG_PRESENT:
746 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
747 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
748 /* lhi %r5,0 */
749 EMIT4(0xa7580000);
750 /* icm %r5,3,<d(vlan_tci)>(%r2) */
751 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
752 if (filter->code == BPF_S_ANC_VLAN_TAG) {
753 /* nill %r5,0xefff */
754 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
755 } else {
756 /* nill %r5,0x1000 */
757 EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
758 /* srl %r5,12 */
759 EMIT4_DISP(0x88500000, 12);
760 }
761 break;
Heiko Carstensc9a7afa2013-07-17 14:26:50 +0200762 case BPF_S_ANC_PKTTYPE:
763 if (pkt_type_offset < 0)
764 goto out;
765 /* lhi %r5,0 */
766 EMIT4(0xa7580000);
767 /* ic %r5,<d(pkt_type_offset)>(%r2) */
768 EMIT4_DISP(0x43502000, pkt_type_offset);
769 /* srl %r5,5 */
770 EMIT4_DISP(0x88500000, 5);
771 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200772 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
773#ifdef CONFIG_SMP
774 /* l %r5,<d(cpu_nr)> */
775 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
776#else
777 /* lhi %r5,0 */
778 EMIT4(0xa7580000);
779#endif
780 break;
781 default: /* too complex, give up */
782 goto out;
783 }
784 addrs[i] = jit->prg - jit->start;
785 return 0;
786out:
787 return -1;
788}
789
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200790/*
791 * Note: for security reasons, bpf code will follow a randomly
792 * sized amount of illegal instructions.
793 */
794struct bpf_binary_header {
795 unsigned int pages;
796 u8 image[];
797};
798
799static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
800 u8 **image_ptr)
801{
802 struct bpf_binary_header *header;
803 unsigned int sz, hole;
804
805 /* Most BPF filters are really small, but if some of them fill a page,
806 * allow at least 128 extra bytes for illegal instructions.
807 */
808 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
809 header = module_alloc(sz);
810 if (!header)
811 return NULL;
812 memset(header, 0, sz);
813 header->pages = sz / PAGE_SIZE;
Heiko Carstens47849552013-09-02 13:08:25 +0200814 hole = sz - (bpfsize + sizeof(*header));
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200815 /* Insert random number of illegal instructions before BPF code
816 * and make sure the first instruction starts at an even address.
817 */
818 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
819 return header;
820}
821
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200822void bpf_jit_compile(struct sk_filter *fp)
823{
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200824 struct bpf_binary_header *header = NULL;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200825 unsigned long size, prg_len, lit_len;
826 struct bpf_jit jit, cjit;
827 unsigned int *addrs;
828 int pass, i;
829
830 if (!bpf_jit_enable)
831 return;
Stelian Nirlu3d04fea2013-03-11 18:22:10 +0200832 addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200833 if (addrs == NULL)
834 return;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200835 memset(&jit, 0, sizeof(cjit));
836 memset(&cjit, 0, sizeof(cjit));
837
838 for (pass = 0; pass < 10; pass++) {
839 jit.prg = jit.start;
840 jit.lit = jit.mid;
841
842 bpf_jit_prologue(&jit);
843 bpf_jit_noleaks(&jit, fp->insns);
844 for (i = 0; i < fp->len; i++) {
845 if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
846 i == fp->len - 1))
847 goto out;
848 }
849 bpf_jit_epilogue(&jit);
850 if (jit.start) {
851 WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
852 if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
853 break;
854 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
855 prg_len = jit.prg - jit.start;
856 lit_len = jit.lit - jit.mid;
Heiko Carstens1eeb7472013-07-16 10:24:48 +0200857 size = prg_len + lit_len;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200858 if (size >= BPF_SIZE_MAX)
859 goto out;
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200860 header = bpf_alloc_binary(size, &jit.start);
861 if (!header)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200862 goto out;
863 jit.prg = jit.mid = jit.start + prg_len;
864 jit.lit = jit.end = jit.start + prg_len + lit_len;
865 jit.base_ip += (unsigned long) jit.start;
866 jit.exit_ip += (unsigned long) jit.start;
867 jit.ret0_ip += (unsigned long) jit.start;
868 }
869 cjit = jit;
870 }
871 if (bpf_jit_enable > 1) {
Heiko Carstensfee1b542013-07-16 10:36:06 +0200872 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
873 if (jit.start)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200874 print_fn_code(jit.start, jit.mid - jit.start);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200875 }
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200876 if (jit.start) {
877 set_memory_ro((unsigned long)header, header->pages);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200878 fp->bpf_func = (void *) jit.start;
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +0100879 fp->jited = 1;
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200880 }
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200881out:
882 kfree(addrs);
883}
884
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200885void bpf_jit_free(struct sk_filter *fp)
886{
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200887 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
888 struct bpf_binary_header *header = (void *)addr;
889
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +0100890 if (!fp->jited)
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700891 goto free_filter;
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +0100892
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200893 set_memory_rw(addr, header->pages);
894 module_free(NULL, header);
Daniel Borkmannf8bbbfc2014-03-28 18:58:18 +0100895
Alexei Starovoitovd45ed4a2013-10-04 00:14:06 -0700896free_filter:
897 kfree(fp);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200898}