blob: 8c83f4b8eb15a7c314c10f9b44963b2220672d25 [file] [log] [blame]
David S. Miller2809a20872012-04-15 20:06:27 -07001#include <asm/ptrace.h>
2
3#include "bpf_jit.h"
4
5#ifdef CONFIG_SPARC64
6#define SAVE_SZ 176
7#define SCRATCH_OFF STACK_BIAS + 128
8#define BE_PTR(label) be,pn %xcc, label
Alexei Starovoitov35607b02014-09-23 13:50:10 -07009#define SIGN_EXTEND(reg) sra reg, 0, reg
David S. Miller2809a20872012-04-15 20:06:27 -070010#else
11#define SAVE_SZ 96
12#define SCRATCH_OFF 72
13#define BE_PTR(label) be label
Alexei Starovoitov35607b02014-09-23 13:50:10 -070014#define SIGN_EXTEND(reg)
David S. Miller2809a20872012-04-15 20:06:27 -070015#endif
16
17#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
18
19 .text
20 .globl bpf_jit_load_word
21bpf_jit_load_word:
22 cmp r_OFF, 0
23 bl bpf_slow_path_word_neg
24 nop
25 .globl bpf_jit_load_word_positive_offset
26bpf_jit_load_word_positive_offset:
27 sub r_HEADLEN, r_OFF, r_TMP
28 cmp r_TMP, 3
29 ble bpf_slow_path_word
30 add r_SKB_DATA, r_OFF, r_TMP
31 andcc r_TMP, 3, %g0
32 bne load_word_unaligned
33 nop
34 retl
David S. Miller7b56f762012-04-17 16:08:29 -040035 ld [r_TMP], r_A
David S. Miller2809a20872012-04-15 20:06:27 -070036load_word_unaligned:
37 ldub [r_TMP + 0x0], r_OFF
38 ldub [r_TMP + 0x1], r_TMP2
39 sll r_OFF, 8, r_OFF
40 or r_OFF, r_TMP2, r_OFF
41 ldub [r_TMP + 0x2], r_TMP2
42 sll r_OFF, 8, r_OFF
43 or r_OFF, r_TMP2, r_OFF
44 ldub [r_TMP + 0x3], r_TMP2
45 sll r_OFF, 8, r_OFF
46 retl
47 or r_OFF, r_TMP2, r_A
48
49 .globl bpf_jit_load_half
50bpf_jit_load_half:
51 cmp r_OFF, 0
52 bl bpf_slow_path_half_neg
53 nop
54 .globl bpf_jit_load_half_positive_offset
55bpf_jit_load_half_positive_offset:
56 sub r_HEADLEN, r_OFF, r_TMP
57 cmp r_TMP, 1
58 ble bpf_slow_path_half
59 add r_SKB_DATA, r_OFF, r_TMP
60 andcc r_TMP, 1, %g0
61 bne load_half_unaligned
62 nop
63 retl
David S. Miller7b56f762012-04-17 16:08:29 -040064 lduh [r_TMP], r_A
David S. Miller2809a20872012-04-15 20:06:27 -070065load_half_unaligned:
66 ldub [r_TMP + 0x0], r_OFF
67 ldub [r_TMP + 0x1], r_TMP2
68 sll r_OFF, 8, r_OFF
69 retl
70 or r_OFF, r_TMP2, r_A
71
72 .globl bpf_jit_load_byte
73bpf_jit_load_byte:
74 cmp r_OFF, 0
75 bl bpf_slow_path_byte_neg
76 nop
77 .globl bpf_jit_load_byte_positive_offset
78bpf_jit_load_byte_positive_offset:
79 cmp r_OFF, r_HEADLEN
80 bge bpf_slow_path_byte
81 nop
82 retl
83 ldub [r_SKB_DATA + r_OFF], r_A
84
85 .globl bpf_jit_load_byte_msh
86bpf_jit_load_byte_msh:
87 cmp r_OFF, 0
88 bl bpf_slow_path_byte_msh_neg
89 nop
90 .globl bpf_jit_load_byte_msh_positive_offset
91bpf_jit_load_byte_msh_positive_offset:
92 cmp r_OFF, r_HEADLEN
93 bge bpf_slow_path_byte_msh
94 nop
95 ldub [r_SKB_DATA + r_OFF], r_OFF
96 and r_OFF, 0xf, r_OFF
97 retl
98 sll r_OFF, 2, r_X
99
100#define bpf_slow_path_common(LEN) \
101 save %sp, -SAVE_SZ, %sp; \
102 mov %i0, %o0; \
103 mov r_OFF, %o1; \
104 add %fp, SCRATCH_OFF, %o2; \
105 call skb_copy_bits; \
106 mov (LEN), %o3; \
107 cmp %o0, 0; \
108 restore;
109
110bpf_slow_path_word:
111 bpf_slow_path_common(4)
112 bl bpf_error
113 ld [%sp + SCRATCH_OFF], r_A
114 retl
115 nop
116bpf_slow_path_half:
117 bpf_slow_path_common(2)
118 bl bpf_error
119 lduh [%sp + SCRATCH_OFF], r_A
120 retl
121 nop
122bpf_slow_path_byte:
123 bpf_slow_path_common(1)
124 bl bpf_error
125 ldub [%sp + SCRATCH_OFF], r_A
126 retl
127 nop
128bpf_slow_path_byte_msh:
129 bpf_slow_path_common(1)
130 bl bpf_error
131 ldub [%sp + SCRATCH_OFF], r_A
132 and r_OFF, 0xf, r_OFF
133 retl
134 sll r_OFF, 2, r_X
135
136#define bpf_negative_common(LEN) \
137 save %sp, -SAVE_SZ, %sp; \
138 mov %i0, %o0; \
139 mov r_OFF, %o1; \
Alexei Starovoitov35607b02014-09-23 13:50:10 -0700140 SIGN_EXTEND(%o1); \
David S. Miller2809a20872012-04-15 20:06:27 -0700141 call bpf_internal_load_pointer_neg_helper; \
142 mov (LEN), %o2; \
143 mov %o0, r_TMP; \
144 cmp %o0, 0; \
145 BE_PTR(bpf_error); \
146 restore;
147
148bpf_slow_path_word_neg:
149 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
150 cmp r_OFF, r_TMP
151 bl bpf_error
152 nop
153 .globl bpf_jit_load_word_negative_offset
154bpf_jit_load_word_negative_offset:
155 bpf_negative_common(4)
156 andcc r_TMP, 3, %g0
157 bne load_word_unaligned
158 nop
159 retl
160 ld [r_TMP], r_A
161
162bpf_slow_path_half_neg:
163 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
164 cmp r_OFF, r_TMP
165 bl bpf_error
166 nop
167 .globl bpf_jit_load_half_negative_offset
168bpf_jit_load_half_negative_offset:
169 bpf_negative_common(2)
170 andcc r_TMP, 1, %g0
171 bne load_half_unaligned
172 nop
173 retl
174 lduh [r_TMP], r_A
175
176bpf_slow_path_byte_neg:
177 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
178 cmp r_OFF, r_TMP
179 bl bpf_error
180 nop
181 .globl bpf_jit_load_byte_negative_offset
182bpf_jit_load_byte_negative_offset:
183 bpf_negative_common(1)
184 retl
185 ldub [r_TMP], r_A
186
187bpf_slow_path_byte_msh_neg:
188 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
189 cmp r_OFF, r_TMP
190 bl bpf_error
191 nop
192 .globl bpf_jit_load_byte_msh_negative_offset
193bpf_jit_load_byte_msh_negative_offset:
194 bpf_negative_common(1)
195 ldub [r_TMP], r_OFF
196 and r_OFF, 0xf, r_OFF
197 retl
198 sll r_OFF, 2, r_X
199
200bpf_error:
David S. Miller584c5e22012-04-17 16:38:16 -0400201 /* Make the JIT program return zero. The JIT epilogue
202 * stores away the original %o7 into r_saved_O7. The
203 * normal leaf function return is to use "retl" which
204 * would evalute to "jmpl %o7 + 8, %g0" but we want to
205 * use the saved value thus the sequence you see here.
206 */
David S. Miller2809a20872012-04-15 20:06:27 -0700207 jmpl r_saved_O7 + 8, %g0
208 clr %o0