blob: dabf4179cd7e373ac39dde61a6027f631a4544bc [file] [log] [blame]
Markos Chandras266a88e2015-06-04 11:56:16 +01001/*
2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
3 * compiler.
4 *
5 * Copyright (C) 2015 Imagination Technologies Ltd.
6 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License.
11 */
12
13#include <asm/asm.h>
14#include <asm/regdef.h>
15#include "bpf_jit.h"
16
17/* ABI
18 *
19 * r_skb_hl skb header length
20 * r_skb_data skb data
21 * r_off(a1) offset register
22 * r_A BPF register A
23 * r_X PF register X
24 * r_skb(a0) *skb
25 * r_M *scratch memory
26 * r_skb_le skb length
27 * r_s0 Scratch register 0
28 * r_s1 Scratch register 1
29 *
30 * On entry:
31 * a0: *skb
32 * a1: offset (imm or imm + X)
33 *
34 * All non-BPF-ABI registers are free for use. On return, we only
35 * care about r_ret. The BPF-ABI registers are assumed to remain
36 * unmodified during the entire filter operation.
37 */
38
39#define skb a0
40#define offset a1
41#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
42
43 /* We know better :) so prevent assembler reordering etc */
44 .set noreorder
45
46#define is_offset_negative(TYPE) \
47 /* If offset is negative we have more work to do */ \
48 slti t0, offset, 0; \
49 bgtz t0, bpf_slow_path_##TYPE##_neg; \
50 /* Be careful what follows in DS. */
51
52#define is_offset_in_header(SIZE, TYPE) \
53 /* Reading from header? */ \
54 addiu $r_s0, $r_skb_hl, -SIZE; \
55 slt t0, $r_s0, offset; \
56 bgtz t0, bpf_slow_path_##TYPE; \
57
58LEAF(sk_load_word)
59 is_offset_negative(word)
60 .globl sk_load_word_positive
61sk_load_word_positive:
62 is_offset_in_header(4, word)
63 /* Offset within header boundaries */
64 PTR_ADDU t1, $r_skb_data, offset
65 lw $r_A, 0(t1)
66#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +020067# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +010068 wsbh t0, $r_A
69 rotr $r_A, t0, 16
Aurelien Jarnob259e512015-09-05 18:46:57 +020070# else
71 sll t0, $r_A, 24
72 srl t1, $r_A, 24
73 srl t2, $r_A, 8
74 or t0, t0, t1
75 andi t2, t2, 0xff00
76 andi t1, $r_A, 0xff00
77 or t0, t0, t2
78 sll t1, t1, 8
79 or $r_A, t0, t1
80# endif
Markos Chandras266a88e2015-06-04 11:56:16 +010081#endif
82 jr $r_ra
83 move $r_ret, zero
84 END(sk_load_word)
85
86LEAF(sk_load_half)
87 is_offset_negative(half)
88 .globl sk_load_half_positive
89sk_load_half_positive:
90 is_offset_in_header(2, half)
91 /* Offset within header boundaries */
92 PTR_ADDU t1, $r_skb_data, offset
93 lh $r_A, 0(t1)
94#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +020095# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +010096 wsbh t0, $r_A
97 seh $r_A, t0
Aurelien Jarnob259e512015-09-05 18:46:57 +020098# else
99 sll t0, $r_A, 24
100 andi t1, $r_A, 0xff00
101 sra t0, t0, 16
102 srl t1, t1, 8
103 or $r_A, t0, t1
104# endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100105#endif
106 jr $r_ra
107 move $r_ret, zero
108 END(sk_load_half)
109
110LEAF(sk_load_byte)
111 is_offset_negative(byte)
112 .globl sk_load_byte_positive
113sk_load_byte_positive:
114 is_offset_in_header(1, byte)
115 /* Offset within header boundaries */
116 PTR_ADDU t1, $r_skb_data, offset
117 lb $r_A, 0(t1)
118 jr $r_ra
119 move $r_ret, zero
120 END(sk_load_byte)
121
122/*
123 * call skb_copy_bits:
124 * (prototype in linux/skbuff.h)
125 *
126 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
127 *
128 * o32 mandates we leave 4 spaces for argument registers in case
129 * the callee needs to use them. Even though we don't care about
130 * the argument registers ourselves, we need to allocate that space
131 * to remain ABI compliant since the callee may want to use that space.
132 * We also allocate 2 more spaces for $r_ra and our return register (*to).
133 *
134 * n64 is a bit different. The *caller* will allocate the space to preserve
135 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
136 * good reason but it does not matter that much really.
137 *
138 * (void *to) is returned in r_s0
139 *
140 */
141#define bpf_slow_path_common(SIZE) \
142 /* Quick check. Are we within reasonable boundaries? */ \
143 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
144 sltu $r_s0, offset, $r_s1; \
145 beqz $r_s0, fault; \
146 /* Load 4th argument in DS */ \
147 LONG_ADDIU a3, zero, SIZE; \
148 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
149 PTR_LA t0, skb_copy_bits; \
150 PTR_S $r_ra, (5 * SZREG)($r_sp); \
151 /* Assign low slot to a2 */ \
152 move a2, $r_sp; \
153 jalr t0; \
154 /* Reset our destination slot (DS but it's ok) */ \
155 INT_S zero, (4 * SZREG)($r_sp); \
156 /* \
157 * skb_copy_bits returns 0 on success and -EFAULT \
158 * on error. Our data live in a2. Do not bother with \
159 * our data if an error has been returned. \
160 */ \
161 /* Restore our frame */ \
162 PTR_L $r_ra, (5 * SZREG)($r_sp); \
163 INT_L $r_s0, (4 * SZREG)($r_sp); \
164 bltz v0, fault; \
165 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
166 move $r_ret, zero; \
167
168NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
169 bpf_slow_path_common(4)
170#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +0200171# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +0100172 wsbh t0, $r_s0
173 jr $r_ra
174 rotr $r_A, t0, 16
Aurelien Jarnob259e512015-09-05 18:46:57 +0200175# else
176 sll t0, $r_s0, 24
177 srl t1, $r_s0, 24
178 srl t2, $r_s0, 8
179 or t0, t0, t1
180 andi t2, t2, 0xff00
181 andi t1, $r_s0, 0xff00
182 or t0, t0, t2
183 sll t1, t1, 8
184 jr $r_ra
185 or $r_A, t0, t1
186# endif
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200187#else
Markos Chandras266a88e2015-06-04 11:56:16 +0100188 jr $r_ra
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200189 move $r_A, $r_s0
190#endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100191
192 END(bpf_slow_path_word)
193
194NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
195 bpf_slow_path_common(2)
196#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +0200197# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +0100198 jr $r_ra
199 wsbh $r_A, $r_s0
Aurelien Jarnob259e512015-09-05 18:46:57 +0200200# else
201 sll t0, $r_s0, 8
202 andi t1, $r_s0, 0xff00
203 andi t0, t0, 0xff00
204 srl t1, t1, 8
205 jr $r_ra
206 or $r_A, t0, t1
207# endif
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200208#else
Markos Chandras266a88e2015-06-04 11:56:16 +0100209 jr $r_ra
210 move $r_A, $r_s0
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200211#endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100212
213 END(bpf_slow_path_half)
214
215NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
216 bpf_slow_path_common(1)
217 jr $r_ra
218 move $r_A, $r_s0
219
220 END(bpf_slow_path_byte)
221
222/*
223 * Negative entry points
224 */
225 .macro bpf_is_end_of_data
226 li t0, SKF_LL_OFF
227 /* Reading link layer data? */
228 slt t1, offset, t0
229 bgtz t1, fault
230 /* Be careful what follows in DS. */
231 .endm
232/*
233 * call skb_copy_bits:
234 * (prototype in linux/filter.h)
235 *
236 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
237 * int k, unsigned int size)
238 *
239 * see above (bpf_slow_path_common) for ABI restrictions
240 */
241#define bpf_negative_common(SIZE) \
242 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
243 PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
244 PTR_S $r_ra, (5 * SZREG)($r_sp); \
245 jalr t0; \
246 li a2, SIZE; \
247 PTR_L $r_ra, (5 * SZREG)($r_sp); \
248 /* Check return pointer */ \
249 beqz v0, fault; \
250 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
251 /* Preserve our pointer */ \
252 move $r_s0, v0; \
253 /* Set return value */ \
254 move $r_ret, zero; \
255
256bpf_slow_path_word_neg:
257 bpf_is_end_of_data
258NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
259 bpf_negative_common(4)
260 jr $r_ra
261 lw $r_A, 0($r_s0)
262 END(sk_load_word_negative)
263
264bpf_slow_path_half_neg:
265 bpf_is_end_of_data
266NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
267 bpf_negative_common(2)
268 jr $r_ra
269 lhu $r_A, 0($r_s0)
270 END(sk_load_half_negative)
271
272bpf_slow_path_byte_neg:
273 bpf_is_end_of_data
274NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
275 bpf_negative_common(1)
276 jr $r_ra
277 lbu $r_A, 0($r_s0)
278 END(sk_load_byte_negative)
279
280fault:
281 jr $r_ra
282 addiu $r_ret, zero, 1