blob: 788e22395acde967e288f4cc82a20a1d89103a72 [file] [log] [blame]
Martin Schwidefskyc10302e2012-07-31 16:23:59 +02001/*
2 * BPF Jit compiler for s390.
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8#include <linux/moduleloader.h>
9#include <linux/netdevice.h>
Heiko Carstens5303a0f2013-02-09 14:07:50 +010010#include <linux/if_vlan.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020011#include <linux/filter.h>
Heiko Carstensaa2d2c72013-07-16 13:25:49 +020012#include <linux/random.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020013#include <asm/cacheflush.h>
14#include <asm/processor.h>
Heiko Carstens68d98842012-08-28 15:36:14 +020015#include <asm/facility.h>
Martin Schwidefskyc10302e2012-07-31 16:23:59 +020016
17/*
18 * Conventions:
19 * %r2 = skb pointer
20 * %r3 = offset parameter
21 * %r4 = scratch register / length parameter
22 * %r5 = BPF A accumulator
23 * %r8 = return address
24 * %r9 = save register for skb pointer
25 * %r10 = skb->data
26 * %r11 = skb->len - skb->data_len (headlen)
27 * %r12 = BPF X accumulator
28 * %r13 = literal pool pointer
29 * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
30 */
31int bpf_jit_enable __read_mostly;
32
33/*
34 * assembly code in arch/x86/net/bpf_jit.S
35 */
36extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
37extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
38
39struct bpf_jit {
40 unsigned int seen;
41 u8 *start;
42 u8 *prg;
43 u8 *mid;
44 u8 *lit;
45 u8 *end;
46 u8 *base_ip;
47 u8 *ret0_ip;
48 u8 *exit_ip;
49 unsigned int off_load_word;
50 unsigned int off_load_half;
51 unsigned int off_load_byte;
52 unsigned int off_load_bmsh;
53 unsigned int off_load_iword;
54 unsigned int off_load_ihalf;
55 unsigned int off_load_ibyte;
56};
57
58#define BPF_SIZE_MAX 4096 /* Max size for program */
59
60#define SEEN_DATAREF 1 /* might call external helpers */
61#define SEEN_XREG 2 /* ebx is used */
62#define SEEN_MEM 4 /* use mem[] for temporary storage */
63#define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
64#define SEEN_LITERAL 16 /* code uses literals */
65#define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
66#define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
67#define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
68#define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
69#define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
70#define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
71#define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
72
73#define EMIT2(op) \
74({ \
75 if (jit->prg + 2 <= jit->mid) \
76 *(u16 *) jit->prg = op; \
77 jit->prg += 2; \
78})
79
80#define EMIT4(op) \
81({ \
82 if (jit->prg + 4 <= jit->mid) \
83 *(u32 *) jit->prg = op; \
84 jit->prg += 4; \
85})
86
87#define EMIT4_DISP(op, disp) \
88({ \
89 unsigned int __disp = (disp) & 0xfff; \
90 EMIT4(op | __disp); \
91})
92
93#define EMIT4_IMM(op, imm) \
94({ \
95 unsigned int __imm = (imm) & 0xffff; \
96 EMIT4(op | __imm); \
97})
98
99#define EMIT4_PCREL(op, pcrel) \
100({ \
101 long __pcrel = ((pcrel) >> 1) & 0xffff; \
102 EMIT4(op | __pcrel); \
103})
104
105#define EMIT6(op1, op2) \
106({ \
107 if (jit->prg + 6 <= jit->mid) { \
108 *(u32 *) jit->prg = op1; \
109 *(u16 *) (jit->prg + 4) = op2; \
110 } \
111 jit->prg += 6; \
112})
113
114#define EMIT6_DISP(op1, op2, disp) \
115({ \
116 unsigned int __disp = (disp) & 0xfff; \
117 EMIT6(op1 | __disp, op2); \
118})
119
Heiko Carstens68d98842012-08-28 15:36:14 +0200120#define EMIT6_IMM(op, imm) \
121({ \
122 unsigned int __imm = (imm); \
123 EMIT6(op | (__imm >> 16), __imm & 0xffff); \
124})
125
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200126#define EMIT_CONST(val) \
127({ \
128 unsigned int ret; \
129 ret = (unsigned int) (jit->lit - jit->base_ip); \
130 jit->seen |= SEEN_LITERAL; \
131 if (jit->lit + 4 <= jit->end) \
132 *(u32 *) jit->lit = val; \
133 jit->lit += 4; \
134 ret; \
135})
136
137#define EMIT_FN_CONST(bit, fn) \
138({ \
139 unsigned int ret; \
140 ret = (unsigned int) (jit->lit - jit->base_ip); \
141 if (jit->seen & bit) { \
142 jit->seen |= SEEN_LITERAL; \
143 if (jit->lit + 8 <= jit->end) \
144 *(void **) jit->lit = fn; \
145 jit->lit += 8; \
146 } \
147 ret; \
148})
149
150static void bpf_jit_prologue(struct bpf_jit *jit)
151{
152 /* Save registers and create stack frame if necessary */
153 if (jit->seen & SEEN_DATAREF) {
154 /* stmg %r8,%r15,88(%r15) */
155 EMIT6(0xeb8ff058, 0x0024);
156 /* lgr %r14,%r15 */
157 EMIT4(0xb90400ef);
158 /* ahi %r15,<offset> */
159 EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
160 /* stg %r14,152(%r15) */
161 EMIT6(0xe3e0f098, 0x0024);
162 } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
163 /* stmg %r12,%r13,120(%r15) */
164 EMIT6(0xebcdf078, 0x0024);
165 else if (jit->seen & SEEN_XREG)
166 /* stg %r12,120(%r15) */
167 EMIT6(0xe3c0f078, 0x0024);
168 else if (jit->seen & SEEN_LITERAL)
169 /* stg %r13,128(%r15) */
170 EMIT6(0xe3d0f080, 0x0024);
171
172 /* Setup literal pool */
173 if (jit->seen & SEEN_LITERAL) {
174 /* basr %r13,0 */
175 EMIT2(0x0dd0);
176 jit->base_ip = jit->prg;
177 }
178 jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
179 jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
180 jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
181 jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
182 jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
183 jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
184 jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
185
186 /* Filter needs to access skb data */
187 if (jit->seen & SEEN_DATAREF) {
188 /* l %r11,<len>(%r2) */
189 EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
190 /* s %r11,<data_len>(%r2) */
191 EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
192 /* lg %r10,<data>(%r2) */
193 EMIT6_DISP(0xe3a02000, 0x0004,
194 offsetof(struct sk_buff, data));
195 }
196}
197
198static void bpf_jit_epilogue(struct bpf_jit *jit)
199{
200 /* Return 0 */
201 if (jit->seen & SEEN_RET0) {
202 jit->ret0_ip = jit->prg;
203 /* lghi %r2,0 */
204 EMIT4(0xa7290000);
205 }
206 jit->exit_ip = jit->prg;
207 /* Restore registers */
208 if (jit->seen & SEEN_DATAREF)
209 /* lmg %r8,%r15,<offset>(%r15) */
210 EMIT6_DISP(0xeb8ff000, 0x0004,
211 (jit->seen & SEEN_MEM) ? 200 : 168);
212 else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
213 /* lmg %r12,%r13,120(%r15) */
214 EMIT6(0xebcdf078, 0x0004);
215 else if (jit->seen & SEEN_XREG)
216 /* lg %r12,120(%r15) */
217 EMIT6(0xe3c0f078, 0x0004);
218 else if (jit->seen & SEEN_LITERAL)
219 /* lg %r13,128(%r15) */
220 EMIT6(0xe3d0f080, 0x0004);
221 /* br %r14 */
222 EMIT2(0x07fe);
223}
224
225/*
226 * make sure we dont leak kernel information to user
227 */
228static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
229{
230 /* Clear temporary memory if (seen & SEEN_MEM) */
231 if (jit->seen & SEEN_MEM)
232 /* xc 0(64,%r15),0(%r15) */
233 EMIT6(0xd73ff000, 0xf000);
234 /* Clear X if (seen & SEEN_XREG) */
235 if (jit->seen & SEEN_XREG)
236 /* lhi %r12,0 */
237 EMIT4(0xa7c80000);
238 /* Clear A if the first register does not set it. */
239 switch (filter[0].code) {
240 case BPF_S_LD_W_ABS:
241 case BPF_S_LD_H_ABS:
242 case BPF_S_LD_B_ABS:
243 case BPF_S_LD_W_LEN:
244 case BPF_S_LD_W_IND:
245 case BPF_S_LD_H_IND:
246 case BPF_S_LD_B_IND:
247 case BPF_S_LDX_B_MSH:
248 case BPF_S_LD_IMM:
249 case BPF_S_LD_MEM:
250 case BPF_S_MISC_TXA:
251 case BPF_S_ANC_PROTOCOL:
252 case BPF_S_ANC_PKTTYPE:
253 case BPF_S_ANC_IFINDEX:
254 case BPF_S_ANC_MARK:
255 case BPF_S_ANC_QUEUE:
256 case BPF_S_ANC_HATYPE:
257 case BPF_S_ANC_RXHASH:
258 case BPF_S_ANC_CPU:
Heiko Carstens5303a0f2013-02-09 14:07:50 +0100259 case BPF_S_ANC_VLAN_TAG:
260 case BPF_S_ANC_VLAN_TAG_PRESENT:
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200261 case BPF_S_RET_K:
262 /* first instruction sets A register */
263 break;
264 default: /* A = 0 */
265 /* lhi %r5,0 */
266 EMIT4(0xa7580000);
267 }
268}
269
270static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
271 unsigned int *addrs, int i, int last)
272{
273 unsigned int K;
274 int offset;
275 unsigned int mask;
276
277 K = filter->k;
278 switch (filter->code) {
279 case BPF_S_ALU_ADD_X: /* A += X */
280 jit->seen |= SEEN_XREG;
281 /* ar %r5,%r12 */
282 EMIT2(0x1a5c);
283 break;
284 case BPF_S_ALU_ADD_K: /* A += K */
285 if (!K)
286 break;
287 if (K <= 16383)
288 /* ahi %r5,<K> */
289 EMIT4_IMM(0xa75a0000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200290 else if (test_facility(21))
291 /* alfi %r5,<K> */
292 EMIT6_IMM(0xc25b0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200293 else
294 /* a %r5,<d(K)>(%r13) */
295 EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
296 break;
297 case BPF_S_ALU_SUB_X: /* A -= X */
298 jit->seen |= SEEN_XREG;
299 /* sr %r5,%r12 */
300 EMIT2(0x1b5c);
301 break;
302 case BPF_S_ALU_SUB_K: /* A -= K */
303 if (!K)
304 break;
305 if (K <= 16384)
306 /* ahi %r5,-K */
307 EMIT4_IMM(0xa75a0000, -K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200308 else if (test_facility(21))
309 /* alfi %r5,-K */
310 EMIT6_IMM(0xc25b0000, -K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200311 else
312 /* s %r5,<d(K)>(%r13) */
313 EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
314 break;
315 case BPF_S_ALU_MUL_X: /* A *= X */
316 jit->seen |= SEEN_XREG;
317 /* msr %r5,%r12 */
318 EMIT4(0xb252005c);
319 break;
320 case BPF_S_ALU_MUL_K: /* A *= K */
321 if (K <= 16383)
322 /* mhi %r5,K */
323 EMIT4_IMM(0xa75c0000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200324 else if (test_facility(34))
325 /* msfi %r5,<K> */
326 EMIT6_IMM(0xc2510000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200327 else
328 /* ms %r5,<d(K)>(%r13) */
329 EMIT4_DISP(0x7150d000, EMIT_CONST(K));
330 break;
331 case BPF_S_ALU_DIV_X: /* A /= X */
332 jit->seen |= SEEN_XREG | SEEN_RET0;
333 /* ltr %r12,%r12 */
334 EMIT2(0x12cc);
335 /* jz <ret0> */
336 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
337 /* lhi %r4,0 */
338 EMIT4(0xa7480000);
339 /* dr %r4,%r12 */
340 EMIT2(0x1d4c);
341 break;
342 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
343 /* m %r4,<d(K)>(%r13) */
344 EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
345 /* lr %r5,%r4 */
346 EMIT2(0x1854);
347 break;
Heiko Carstens32472742012-12-01 12:29:08 +0100348 case BPF_S_ALU_MOD_X: /* A %= X */
349 jit->seen |= SEEN_XREG | SEEN_RET0;
350 /* ltr %r12,%r12 */
351 EMIT2(0x12cc);
352 /* jz <ret0> */
353 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
354 /* lhi %r4,0 */
355 EMIT4(0xa7480000);
356 /* dr %r4,%r12 */
357 EMIT2(0x1d4c);
358 /* lr %r5,%r4 */
359 EMIT2(0x1854);
360 break;
361 case BPF_S_ALU_MOD_K: /* A %= K */
362 /* lhi %r4,0 */
363 EMIT4(0xa7480000);
364 /* d %r4,<d(K)>(%r13) */
365 EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
366 /* lr %r5,%r4 */
367 EMIT2(0x1854);
368 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200369 case BPF_S_ALU_AND_X: /* A &= X */
370 jit->seen |= SEEN_XREG;
371 /* nr %r5,%r12 */
372 EMIT2(0x145c);
373 break;
374 case BPF_S_ALU_AND_K: /* A &= K */
Heiko Carstens68d98842012-08-28 15:36:14 +0200375 if (test_facility(21))
376 /* nilf %r5,<K> */
377 EMIT6_IMM(0xc05b0000, K);
378 else
379 /* n %r5,<d(K)>(%r13) */
380 EMIT4_DISP(0x5450d000, EMIT_CONST(K));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200381 break;
382 case BPF_S_ALU_OR_X: /* A |= X */
383 jit->seen |= SEEN_XREG;
384 /* or %r5,%r12 */
385 EMIT2(0x165c);
386 break;
387 case BPF_S_ALU_OR_K: /* A |= K */
Heiko Carstens68d98842012-08-28 15:36:14 +0200388 if (test_facility(21))
389 /* oilf %r5,<K> */
390 EMIT6_IMM(0xc05d0000, K);
391 else
392 /* o %r5,<d(K)>(%r13) */
393 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200394 break;
Heiko Carstensc59eed12012-09-24 08:31:35 +0200395 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
Heiko Carstens916908d2012-12-01 12:42:32 +0100396 case BPF_S_ALU_XOR_X:
Heiko Carstensc59eed12012-09-24 08:31:35 +0200397 jit->seen |= SEEN_XREG;
398 /* xr %r5,%r12 */
399 EMIT2(0x175c);
400 break;
Heiko Carstens916908d2012-12-01 12:42:32 +0100401 case BPF_S_ALU_XOR_K: /* A ^= K */
402 if (!K)
403 break;
404 /* x %r5,<d(K)>(%r13) */
405 EMIT4_DISP(0x5750d000, EMIT_CONST(K));
406 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200407 case BPF_S_ALU_LSH_X: /* A <<= X; */
408 jit->seen |= SEEN_XREG;
409 /* sll %r5,0(%r12) */
410 EMIT4(0x8950c000);
411 break;
412 case BPF_S_ALU_LSH_K: /* A <<= K */
413 if (K == 0)
414 break;
415 /* sll %r5,K */
416 EMIT4_DISP(0x89500000, K);
417 break;
418 case BPF_S_ALU_RSH_X: /* A >>= X; */
419 jit->seen |= SEEN_XREG;
420 /* srl %r5,0(%r12) */
421 EMIT4(0x8850c000);
422 break;
423 case BPF_S_ALU_RSH_K: /* A >>= K; */
424 if (K == 0)
425 break;
426 /* srl %r5,K */
427 EMIT4_DISP(0x88500000, K);
428 break;
429 case BPF_S_ALU_NEG: /* A = -A */
430 /* lnr %r5,%r5 */
431 EMIT2(0x1155);
432 break;
433 case BPF_S_JMP_JA: /* ip += K */
434 offset = addrs[i + K] + jit->start - jit->prg;
435 EMIT4_PCREL(0xa7f40000, offset);
436 break;
437 case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
438 mask = 0x200000; /* jh */
439 goto kbranch;
440 case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
441 mask = 0xa00000; /* jhe */
442 goto kbranch;
443 case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
444 mask = 0x800000; /* je */
445kbranch: /* Emit compare if the branch targets are different */
446 if (filter->jt != filter->jf) {
447 if (K <= 16383)
448 /* chi %r5,<K> */
449 EMIT4_IMM(0xa75e0000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200450 else if (test_facility(21))
451 /* clfi %r5,<K> */
452 EMIT6_IMM(0xc25f0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200453 else
454 /* c %r5,<d(K)>(%r13) */
455 EMIT4_DISP(0x5950d000, EMIT_CONST(K));
456 }
457branch: if (filter->jt == filter->jf) {
458 if (filter->jt == 0)
459 break;
460 /* j <jt> */
461 offset = addrs[i + filter->jt] + jit->start - jit->prg;
462 EMIT4_PCREL(0xa7f40000, offset);
463 break;
464 }
465 if (filter->jt != 0) {
466 /* brc <mask>,<jt> */
467 offset = addrs[i + filter->jt] + jit->start - jit->prg;
468 EMIT4_PCREL(0xa7040000 | mask, offset);
469 }
470 if (filter->jf != 0) {
471 /* brc <mask^15>,<jf> */
472 offset = addrs[i + filter->jf] + jit->start - jit->prg;
473 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
474 }
475 break;
476 case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
477 mask = 0x700000; /* jnz */
478 /* Emit test if the branch targets are different */
479 if (filter->jt != filter->jf) {
480 if (K > 65535) {
481 /* lr %r4,%r5 */
482 EMIT2(0x1845);
483 /* n %r4,<d(K)>(%r13) */
484 EMIT4_DISP(0x5440d000, EMIT_CONST(K));
485 } else
486 /* tmll %r5,K */
487 EMIT4_IMM(0xa7510000, K);
488 }
489 goto branch;
490 case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
491 mask = 0x200000; /* jh */
492 goto xbranch;
493 case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
494 mask = 0xa00000; /* jhe */
495 goto xbranch;
496 case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
497 mask = 0x800000; /* je */
498xbranch: /* Emit compare if the branch targets are different */
499 if (filter->jt != filter->jf) {
500 jit->seen |= SEEN_XREG;
501 /* cr %r5,%r12 */
502 EMIT2(0x195c);
503 }
504 goto branch;
505 case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
506 mask = 0x700000; /* jnz */
507 /* Emit test if the branch targets are different */
508 if (filter->jt != filter->jf) {
509 jit->seen |= SEEN_XREG;
510 /* lr %r4,%r5 */
511 EMIT2(0x1845);
512 /* nr %r4,%r12 */
513 EMIT2(0x144c);
514 }
515 goto branch;
516 case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
517 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
518 offset = jit->off_load_word;
519 goto load_abs;
520 case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
521 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
522 offset = jit->off_load_half;
523 goto load_abs;
524 case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
525 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
526 offset = jit->off_load_byte;
527load_abs: if ((int) K < 0)
528 goto out;
529call_fn: /* lg %r1,<d(function)>(%r13) */
530 EMIT6_DISP(0xe310d000, 0x0004, offset);
531 /* l %r3,<d(K)>(%r13) */
532 EMIT4_DISP(0x5830d000, EMIT_CONST(K));
533 /* basr %r8,%r1 */
534 EMIT2(0x0d81);
535 /* jnz <ret0> */
536 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
537 break;
538 case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
539 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
540 offset = jit->off_load_iword;
541 goto call_fn;
542 case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
543 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
544 offset = jit->off_load_ihalf;
545 goto call_fn;
546 case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
547 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
548 offset = jit->off_load_ibyte;
549 goto call_fn;
550 case BPF_S_LDX_B_MSH:
551 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
552 jit->seen |= SEEN_RET0;
553 if ((int) K < 0) {
554 /* j <ret0> */
555 EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
556 break;
557 }
558 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
559 offset = jit->off_load_bmsh;
560 goto call_fn;
561 case BPF_S_LD_W_LEN: /* A = skb->len; */
562 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
563 /* l %r5,<d(len)>(%r2) */
564 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
565 break;
566 case BPF_S_LDX_W_LEN: /* X = skb->len; */
567 jit->seen |= SEEN_XREG;
568 /* l %r12,<d(len)>(%r2) */
569 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
570 break;
571 case BPF_S_LD_IMM: /* A = K */
572 if (K <= 16383)
573 /* lhi %r5,K */
574 EMIT4_IMM(0xa7580000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200575 else if (test_facility(21))
576 /* llilf %r5,<K> */
577 EMIT6_IMM(0xc05f0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200578 else
579 /* l %r5,<d(K)>(%r13) */
580 EMIT4_DISP(0x5850d000, EMIT_CONST(K));
581 break;
582 case BPF_S_LDX_IMM: /* X = K */
583 jit->seen |= SEEN_XREG;
584 if (K <= 16383)
585 /* lhi %r12,<K> */
586 EMIT4_IMM(0xa7c80000, K);
Heiko Carstens68d98842012-08-28 15:36:14 +0200587 else if (test_facility(21))
588 /* llilf %r12,<K> */
589 EMIT6_IMM(0xc0cf0000, K);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200590 else
591 /* l %r12,<d(K)>(%r13) */
592 EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
593 break;
594 case BPF_S_LD_MEM: /* A = mem[K] */
595 jit->seen |= SEEN_MEM;
596 /* l %r5,<K>(%r15) */
597 EMIT4_DISP(0x5850f000,
598 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
599 break;
600 case BPF_S_LDX_MEM: /* X = mem[K] */
601 jit->seen |= SEEN_XREG | SEEN_MEM;
602 /* l %r12,<K>(%r15) */
603 EMIT4_DISP(0x58c0f000,
604 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
605 break;
606 case BPF_S_MISC_TAX: /* X = A */
607 jit->seen |= SEEN_XREG;
608 /* lr %r12,%r5 */
609 EMIT2(0x18c5);
610 break;
611 case BPF_S_MISC_TXA: /* A = X */
612 jit->seen |= SEEN_XREG;
613 /* lr %r5,%r12 */
614 EMIT2(0x185c);
615 break;
616 case BPF_S_RET_K:
617 if (K == 0) {
618 jit->seen |= SEEN_RET0;
619 if (last)
620 break;
621 /* j <ret0> */
622 EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
623 } else {
624 if (K <= 16383)
625 /* lghi %r2,K */
626 EMIT4_IMM(0xa7290000, K);
627 else
628 /* llgf %r2,<K>(%r13) */
629 EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
630 /* j <exit> */
631 if (last && !(jit->seen & SEEN_RET0))
632 break;
633 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
634 }
635 break;
636 case BPF_S_RET_A:
637 /* llgfr %r2,%r5 */
638 EMIT4(0xb9160025);
639 /* j <exit> */
640 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
641 break;
642 case BPF_S_ST: /* mem[K] = A */
643 jit->seen |= SEEN_MEM;
644 /* st %r5,<K>(%r15) */
645 EMIT4_DISP(0x5050f000,
646 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
647 break;
648 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
649 jit->seen |= SEEN_XREG | SEEN_MEM;
650 /* st %r12,<K>(%r15) */
651 EMIT4_DISP(0x50c0f000,
652 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
653 break;
654 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
655 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
656 /* lhi %r5,0 */
657 EMIT4(0xa7580000);
658 /* icm %r5,3,<d(protocol)>(%r2) */
659 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
660 break;
661 case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
662 * A = skb->dev->ifindex */
663 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
664 jit->seen |= SEEN_RET0;
665 /* lg %r1,<d(dev)>(%r2) */
666 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
667 /* ltgr %r1,%r1 */
668 EMIT4(0xb9020011);
669 /* jz <ret0> */
670 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
671 /* l %r5,<d(ifindex)>(%r1) */
672 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
673 break;
674 case BPF_S_ANC_MARK: /* A = skb->mark */
675 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
676 /* l %r5,<d(mark)>(%r2) */
677 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
678 break;
679 case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
680 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
681 /* lhi %r5,0 */
682 EMIT4(0xa7580000);
683 /* icm %r5,3,<d(queue_mapping)>(%r2) */
684 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
685 break;
686 case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
687 * A = skb->dev->type */
688 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
689 jit->seen |= SEEN_RET0;
690 /* lg %r1,<d(dev)>(%r2) */
691 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
692 /* ltgr %r1,%r1 */
693 EMIT4(0xb9020011);
694 /* jz <ret0> */
695 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
696 /* lhi %r5,0 */
697 EMIT4(0xa7580000);
698 /* icm %r5,3,<d(type)>(%r1) */
699 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
700 break;
701 case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
702 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
703 /* l %r5,<d(rxhash)>(%r2) */
704 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
705 break;
Heiko Carstens5303a0f2013-02-09 14:07:50 +0100706 case BPF_S_ANC_VLAN_TAG:
707 case BPF_S_ANC_VLAN_TAG_PRESENT:
708 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
709 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
710 /* lhi %r5,0 */
711 EMIT4(0xa7580000);
712 /* icm %r5,3,<d(vlan_tci)>(%r2) */
713 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
714 if (filter->code == BPF_S_ANC_VLAN_TAG) {
715 /* nill %r5,0xefff */
716 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
717 } else {
718 /* nill %r5,0x1000 */
719 EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
720 /* srl %r5,12 */
721 EMIT4_DISP(0x88500000, 12);
722 }
723 break;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200724 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
725#ifdef CONFIG_SMP
726 /* l %r5,<d(cpu_nr)> */
727 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
728#else
729 /* lhi %r5,0 */
730 EMIT4(0xa7580000);
731#endif
732 break;
733 default: /* too complex, give up */
734 goto out;
735 }
736 addrs[i] = jit->prg - jit->start;
737 return 0;
738out:
739 return -1;
740}
741
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200742/*
743 * Note: for security reasons, bpf code will follow a randomly
744 * sized amount of illegal instructions.
745 */
746struct bpf_binary_header {
747 unsigned int pages;
748 u8 image[];
749};
750
751static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
752 u8 **image_ptr)
753{
754 struct bpf_binary_header *header;
755 unsigned int sz, hole;
756
757 /* Most BPF filters are really small, but if some of them fill a page,
758 * allow at least 128 extra bytes for illegal instructions.
759 */
760 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
761 header = module_alloc(sz);
762 if (!header)
763 return NULL;
764 memset(header, 0, sz);
765 header->pages = sz / PAGE_SIZE;
766 hole = sz - bpfsize + sizeof(*header);
767 /* Insert random number of illegal instructions before BPF code
768 * and make sure the first instruction starts at an even address.
769 */
770 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
771 return header;
772}
773
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200774void bpf_jit_compile(struct sk_filter *fp)
775{
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200776 struct bpf_binary_header *header = NULL;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200777 unsigned long size, prg_len, lit_len;
778 struct bpf_jit jit, cjit;
779 unsigned int *addrs;
780 int pass, i;
781
782 if (!bpf_jit_enable)
783 return;
Stelian Nirlu3d04fea2013-03-11 18:22:10 +0200784 addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200785 if (addrs == NULL)
786 return;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200787 memset(&jit, 0, sizeof(cjit));
788 memset(&cjit, 0, sizeof(cjit));
789
790 for (pass = 0; pass < 10; pass++) {
791 jit.prg = jit.start;
792 jit.lit = jit.mid;
793
794 bpf_jit_prologue(&jit);
795 bpf_jit_noleaks(&jit, fp->insns);
796 for (i = 0; i < fp->len; i++) {
797 if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
798 i == fp->len - 1))
799 goto out;
800 }
801 bpf_jit_epilogue(&jit);
802 if (jit.start) {
803 WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
804 if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
805 break;
806 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
807 prg_len = jit.prg - jit.start;
808 lit_len = jit.lit - jit.mid;
Heiko Carstens1eeb7472013-07-16 10:24:48 +0200809 size = prg_len + lit_len;
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200810 if (size >= BPF_SIZE_MAX)
811 goto out;
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200812 header = bpf_alloc_binary(size, &jit.start);
813 if (!header)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200814 goto out;
815 jit.prg = jit.mid = jit.start + prg_len;
816 jit.lit = jit.end = jit.start + prg_len + lit_len;
817 jit.base_ip += (unsigned long) jit.start;
818 jit.exit_ip += (unsigned long) jit.start;
819 jit.ret0_ip += (unsigned long) jit.start;
820 }
821 cjit = jit;
822 }
823 if (bpf_jit_enable > 1) {
Heiko Carstensfee1b542013-07-16 10:36:06 +0200824 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
825 if (jit.start)
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200826 print_fn_code(jit.start, jit.mid - jit.start);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200827 }
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200828 if (jit.start) {
829 set_memory_ro((unsigned long)header, header->pages);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200830 fp->bpf_func = (void *) jit.start;
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200831 }
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200832out:
833 kfree(addrs);
834}
835
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200836void bpf_jit_free(struct sk_filter *fp)
837{
Heiko Carstensaa2d2c72013-07-16 13:25:49 +0200838 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
839 struct bpf_binary_header *header = (void *)addr;
840
841 if (fp->bpf_func == sk_run_filter)
842 return;
843 set_memory_rw(addr, header->pages);
844 module_free(NULL, header);
Martin Schwidefskyc10302e2012-07-31 16:23:59 +0200845}