blob: 88a2075305d1c1d71d09c7e3860367609b81ca04 [file] [log] [blame]
Markos Chandras266a88e2015-06-04 11:56:16 +01001/*
2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
3 * compiler.
4 *
5 * Copyright (C) 2015 Imagination Technologies Ltd.
6 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License.
11 */
12
13#include <asm/asm.h>
14#include <asm/regdef.h>
15#include "bpf_jit.h"
16
17/* ABI
18 *
19 * r_skb_hl skb header length
20 * r_skb_data skb data
21 * r_off(a1) offset register
22 * r_A BPF register A
23 * r_X PF register X
24 * r_skb(a0) *skb
25 * r_M *scratch memory
26 * r_skb_le skb length
27 * r_s0 Scratch register 0
28 * r_s1 Scratch register 1
29 *
30 * On entry:
31 * a0: *skb
32 * a1: offset (imm or imm + X)
33 *
34 * All non-BPF-ABI registers are free for use. On return, we only
35 * care about r_ret. The BPF-ABI registers are assumed to remain
36 * unmodified during the entire filter operation.
37 */
38
39#define skb a0
40#define offset a1
41#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
42
43 /* We know better :) so prevent assembler reordering etc */
44 .set noreorder
45
46#define is_offset_negative(TYPE) \
47 /* If offset is negative we have more work to do */ \
48 slti t0, offset, 0; \
49 bgtz t0, bpf_slow_path_##TYPE##_neg; \
50 /* Be careful what follows in DS. */
51
52#define is_offset_in_header(SIZE, TYPE) \
53 /* Reading from header? */ \
54 addiu $r_s0, $r_skb_hl, -SIZE; \
55 slt t0, $r_s0, offset; \
56 bgtz t0, bpf_slow_path_##TYPE; \
57
58LEAF(sk_load_word)
59 is_offset_negative(word)
Ralf Baechle1e16a8f2015-10-01 15:45:44 +020060FEXPORT(sk_load_word_positive)
Markos Chandras266a88e2015-06-04 11:56:16 +010061 is_offset_in_header(4, word)
62 /* Offset within header boundaries */
63 PTR_ADDU t1, $r_skb_data, offset
Ralf Baechle0c5d1872015-10-02 09:48:57 +020064 .set reorder
Markos Chandras266a88e2015-06-04 11:56:16 +010065 lw $r_A, 0(t1)
Ralf Baechle0c5d1872015-10-02 09:48:57 +020066 .set noreorder
Markos Chandras266a88e2015-06-04 11:56:16 +010067#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +020068# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +010069 wsbh t0, $r_A
70 rotr $r_A, t0, 16
Aurelien Jarnob259e512015-09-05 18:46:57 +020071# else
72 sll t0, $r_A, 24
73 srl t1, $r_A, 24
74 srl t2, $r_A, 8
75 or t0, t0, t1
76 andi t2, t2, 0xff00
77 andi t1, $r_A, 0xff00
78 or t0, t0, t2
79 sll t1, t1, 8
80 or $r_A, t0, t1
81# endif
Markos Chandras266a88e2015-06-04 11:56:16 +010082#endif
83 jr $r_ra
84 move $r_ret, zero
85 END(sk_load_word)
86
87LEAF(sk_load_half)
88 is_offset_negative(half)
Ralf Baechle1e16a8f2015-10-01 15:45:44 +020089FEXPORT(sk_load_half_positive)
Markos Chandras266a88e2015-06-04 11:56:16 +010090 is_offset_in_header(2, half)
91 /* Offset within header boundaries */
92 PTR_ADDU t1, $r_skb_data, offset
David Daney5b52c8c2017-03-14 14:21:44 -070093 lhu $r_A, 0(t1)
Markos Chandras266a88e2015-06-04 11:56:16 +010094#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +020095# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
David Daney5b52c8c2017-03-14 14:21:44 -070096 wsbh $r_A, $r_A
Aurelien Jarnob259e512015-09-05 18:46:57 +020097# else
David Daney5b52c8c2017-03-14 14:21:44 -070098 sll t0, $r_A, 8
99 srl t1, $r_A, 8
100 andi t0, t0, 0xff00
Aurelien Jarnob259e512015-09-05 18:46:57 +0200101 or $r_A, t0, t1
102# endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100103#endif
104 jr $r_ra
105 move $r_ret, zero
106 END(sk_load_half)
107
108LEAF(sk_load_byte)
109 is_offset_negative(byte)
Ralf Baechle1e16a8f2015-10-01 15:45:44 +0200110FEXPORT(sk_load_byte_positive)
Markos Chandras266a88e2015-06-04 11:56:16 +0100111 is_offset_in_header(1, byte)
112 /* Offset within header boundaries */
113 PTR_ADDU t1, $r_skb_data, offset
David Daney5b52c8c2017-03-14 14:21:44 -0700114 lbu $r_A, 0(t1)
Markos Chandras266a88e2015-06-04 11:56:16 +0100115 jr $r_ra
116 move $r_ret, zero
117 END(sk_load_byte)
118
119/*
120 * call skb_copy_bits:
121 * (prototype in linux/skbuff.h)
122 *
123 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
124 *
125 * o32 mandates we leave 4 spaces for argument registers in case
126 * the callee needs to use them. Even though we don't care about
127 * the argument registers ourselves, we need to allocate that space
128 * to remain ABI compliant since the callee may want to use that space.
129 * We also allocate 2 more spaces for $r_ra and our return register (*to).
130 *
131 * n64 is a bit different. The *caller* will allocate the space to preserve
132 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
133 * good reason but it does not matter that much really.
134 *
135 * (void *to) is returned in r_s0
136 *
137 */
David Daney5b52c8c2017-03-14 14:21:44 -0700138#ifdef CONFIG_CPU_LITTLE_ENDIAN
139#define DS_OFFSET(SIZE) (4 * SZREG)
140#else
141#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
142#endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100143#define bpf_slow_path_common(SIZE) \
144 /* Quick check. Are we within reasonable boundaries? */ \
145 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
146 sltu $r_s0, offset, $r_s1; \
147 beqz $r_s0, fault; \
148 /* Load 4th argument in DS */ \
149 LONG_ADDIU a3, zero, SIZE; \
150 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
151 PTR_LA t0, skb_copy_bits; \
152 PTR_S $r_ra, (5 * SZREG)($r_sp); \
153 /* Assign low slot to a2 */ \
David Daney5b52c8c2017-03-14 14:21:44 -0700154 PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
Markos Chandras266a88e2015-06-04 11:56:16 +0100155 jalr t0; \
156 /* Reset our destination slot (DS but it's ok) */ \
157 INT_S zero, (4 * SZREG)($r_sp); \
158 /* \
159 * skb_copy_bits returns 0 on success and -EFAULT \
160 * on error. Our data live in a2. Do not bother with \
161 * our data if an error has been returned. \
162 */ \
163 /* Restore our frame */ \
164 PTR_L $r_ra, (5 * SZREG)($r_sp); \
165 INT_L $r_s0, (4 * SZREG)($r_sp); \
166 bltz v0, fault; \
167 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
168 move $r_ret, zero; \
169
170NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
171 bpf_slow_path_common(4)
172#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +0200173# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +0100174 wsbh t0, $r_s0
175 jr $r_ra
176 rotr $r_A, t0, 16
Aurelien Jarnob259e512015-09-05 18:46:57 +0200177# else
178 sll t0, $r_s0, 24
179 srl t1, $r_s0, 24
180 srl t2, $r_s0, 8
181 or t0, t0, t1
182 andi t2, t2, 0xff00
183 andi t1, $r_s0, 0xff00
184 or t0, t0, t2
185 sll t1, t1, 8
186 jr $r_ra
187 or $r_A, t0, t1
188# endif
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200189#else
Markos Chandras266a88e2015-06-04 11:56:16 +0100190 jr $r_ra
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200191 move $r_A, $r_s0
192#endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100193
194 END(bpf_slow_path_word)
195
196NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
197 bpf_slow_path_common(2)
198#ifdef CONFIG_CPU_LITTLE_ENDIAN
Aurelien Jarnob259e512015-09-05 18:46:57 +0200199# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
Markos Chandras266a88e2015-06-04 11:56:16 +0100200 jr $r_ra
201 wsbh $r_A, $r_s0
Aurelien Jarnob259e512015-09-05 18:46:57 +0200202# else
203 sll t0, $r_s0, 8
204 andi t1, $r_s0, 0xff00
205 andi t0, t0, 0xff00
206 srl t1, t1, 8
207 jr $r_ra
208 or $r_A, t0, t1
209# endif
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200210#else
Markos Chandras266a88e2015-06-04 11:56:16 +0100211 jr $r_ra
212 move $r_A, $r_s0
Aurelien Jarnofaa97242015-09-05 18:46:56 +0200213#endif
Markos Chandras266a88e2015-06-04 11:56:16 +0100214
215 END(bpf_slow_path_half)
216
217NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
218 bpf_slow_path_common(1)
219 jr $r_ra
220 move $r_A, $r_s0
221
222 END(bpf_slow_path_byte)
223
224/*
225 * Negative entry points
226 */
227 .macro bpf_is_end_of_data
228 li t0, SKF_LL_OFF
229 /* Reading link layer data? */
230 slt t1, offset, t0
231 bgtz t1, fault
232 /* Be careful what follows in DS. */
233 .endm
234/*
235 * call skb_copy_bits:
236 * (prototype in linux/filter.h)
237 *
238 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
239 * int k, unsigned int size)
240 *
241 * see above (bpf_slow_path_common) for ABI restrictions
242 */
243#define bpf_negative_common(SIZE) \
244 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
245 PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
246 PTR_S $r_ra, (5 * SZREG)($r_sp); \
247 jalr t0; \
248 li a2, SIZE; \
249 PTR_L $r_ra, (5 * SZREG)($r_sp); \
250 /* Check return pointer */ \
251 beqz v0, fault; \
252 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
253 /* Preserve our pointer */ \
254 move $r_s0, v0; \
255 /* Set return value */ \
256 move $r_ret, zero; \
257
258bpf_slow_path_word_neg:
259 bpf_is_end_of_data
260NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
261 bpf_negative_common(4)
262 jr $r_ra
263 lw $r_A, 0($r_s0)
264 END(sk_load_word_negative)
265
266bpf_slow_path_half_neg:
267 bpf_is_end_of_data
268NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
269 bpf_negative_common(2)
270 jr $r_ra
271 lhu $r_A, 0($r_s0)
272 END(sk_load_half_negative)
273
274bpf_slow_path_byte_neg:
275 bpf_is_end_of_data
276NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
277 bpf_negative_common(1)
278 jr $r_ra
279 lbu $r_A, 0($r_s0)
280 END(sk_load_byte_negative)
281
282fault:
283 jr $r_ra
284 addiu $r_ret, zero, 1