blob: 3dd9c43d40c971f003b9621dfe135412839be3b3 [file] [log] [blame]
Matt Evans0ca87f02011-07-20 15:51:00 +00001/* bpf_jit.S: Packet/header access helper functions
2 * for PPC64 BPF compiler.
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11
12#include <asm/ppc_asm.h>
Naveen N. Rao6ac0ba52016-06-22 21:55:06 +053013#include "bpf_jit32.h"
Matt Evans0ca87f02011-07-20 15:51:00 +000014
15/*
16 * All of these routines are called directly from generated code,
17 * whose register usage is:
18 *
19 * r3 skb
20 * r4,r5 A,X
21 * r6 *** address parameter to helper ***
22 * r7-r10 scratch
23 * r14 skb->data
24 * r15 skb headlen
25 * r16-31 M[]
26 */
27
28/*
29 * To consider: These helpers are so small it could be better to just
30 * generate them inline. Inline code can do the simple headlen check
31 * then branch directly to slow_path_XXX if required. (In fact, could
32 * load a spare GPR with the address of slow_path_generic and pass size
33 * as an argument, making the call site a mtlr, li and bllr.)
Matt Evans0ca87f02011-07-20 15:51:00 +000034 */
35 .globl sk_load_word
36sk_load_word:
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030037 PPC_LCMPI r_addr, 0
Jan Seiffert05be1822012-04-29 19:02:19 +000038 blt bpf_slow_path_word_neg
39 .globl sk_load_word_positive_offset
40sk_load_word_positive_offset:
Matt Evans0ca87f02011-07-20 15:51:00 +000041 /* Are we accessing past headlen? */
42 subi r_scratch1, r_HL, 4
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030043 PPC_LCMP r_scratch1, r_addr
Matt Evans0ca87f02011-07-20 15:51:00 +000044 blt bpf_slow_path_word
45 /* Nope, just hitting the header. cr0 here is eq or gt! */
Philippe Bergheaud9c662ca2013-09-24 14:13:35 +020046#ifdef __LITTLE_ENDIAN__
47 lwbrx r_A, r_D, r_addr
48#else
Matt Evans0ca87f02011-07-20 15:51:00 +000049 lwzx r_A, r_D, r_addr
Philippe Bergheaud9c662ca2013-09-24 14:13:35 +020050#endif
Matt Evans0ca87f02011-07-20 15:51:00 +000051 blr /* Return success, cr0 != LT */
52
53 .globl sk_load_half
54sk_load_half:
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030055 PPC_LCMPI r_addr, 0
Jan Seiffert05be1822012-04-29 19:02:19 +000056 blt bpf_slow_path_half_neg
57 .globl sk_load_half_positive_offset
58sk_load_half_positive_offset:
Matt Evans0ca87f02011-07-20 15:51:00 +000059 subi r_scratch1, r_HL, 2
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030060 PPC_LCMP r_scratch1, r_addr
Matt Evans0ca87f02011-07-20 15:51:00 +000061 blt bpf_slow_path_half
Philippe Bergheaud9c662ca2013-09-24 14:13:35 +020062#ifdef __LITTLE_ENDIAN__
63 lhbrx r_A, r_D, r_addr
64#else
Matt Evans0ca87f02011-07-20 15:51:00 +000065 lhzx r_A, r_D, r_addr
Philippe Bergheaud9c662ca2013-09-24 14:13:35 +020066#endif
Matt Evans0ca87f02011-07-20 15:51:00 +000067 blr
68
69 .globl sk_load_byte
70sk_load_byte:
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030071 PPC_LCMPI r_addr, 0
Jan Seiffert05be1822012-04-29 19:02:19 +000072 blt bpf_slow_path_byte_neg
73 .globl sk_load_byte_positive_offset
74sk_load_byte_positive_offset:
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030075 PPC_LCMP r_HL, r_addr
Matt Evans0ca87f02011-07-20 15:51:00 +000076 ble bpf_slow_path_byte
77 lbzx r_A, r_D, r_addr
78 blr
79
80/*
Daniel Borkmann34805932014-05-29 10:22:50 +020081 * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
Jan Seiffert05be1822012-04-29 19:02:19 +000082 * r_addr is the offset value
Matt Evans0ca87f02011-07-20 15:51:00 +000083 */
84 .globl sk_load_byte_msh
85sk_load_byte_msh:
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030086 PPC_LCMPI r_addr, 0
Jan Seiffert05be1822012-04-29 19:02:19 +000087 blt bpf_slow_path_byte_msh_neg
88 .globl sk_load_byte_msh_positive_offset
89sk_load_byte_msh_positive_offset:
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +030090 PPC_LCMP r_HL, r_addr
Matt Evans0ca87f02011-07-20 15:51:00 +000091 ble bpf_slow_path_byte_msh
92 lbzx r_X, r_D, r_addr
93 rlwinm r_X, r_X, 2, 32-4-2, 31-2
94 blr
95
Matt Evans0ca87f02011-07-20 15:51:00 +000096/* Call out to skb_copy_bits:
97 * We'll need to back up our volatile regs first; we have
98 * local variable space at r1+(BPF_PPC_STACK_BASIC).
99 * Allocate a new stack frame here to remain ABI-compliant in
100 * stashing LR.
101 */
102#define bpf_slow_path_common(SIZE) \
103 mflr r0; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300104 PPC_STL r0, PPC_LR_STKOFF(r1); \
Matt Evans0ca87f02011-07-20 15:51:00 +0000105 /* R3 goes in parameter space of caller's frame */ \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300106 PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
107 PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
108 PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
109 addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
110 PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
Matt Evans0ca87f02011-07-20 15:51:00 +0000111 /* R3 = r_skb, as passed */ \
112 mr r4, r_addr; \
113 li r6, SIZE; \
114 bl skb_copy_bits; \
Michael Ellerman77846552012-06-21 17:50:27 +0000115 nop; \
Matt Evans0ca87f02011-07-20 15:51:00 +0000116 /* R3 = 0 on success */ \
117 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300118 PPC_LL r0, PPC_LR_STKOFF(r1); \
119 PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
120 PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
Matt Evans0ca87f02011-07-20 15:51:00 +0000121 mtlr r0; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300122 PPC_LCMPI r3, 0; \
Matt Evans0ca87f02011-07-20 15:51:00 +0000123 blt bpf_error; /* cr0 = LT */ \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300124 PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
Matt Evans0ca87f02011-07-20 15:51:00 +0000125 /* Great success! */
126
127bpf_slow_path_word:
128 bpf_slow_path_common(4)
129 /* Data value is on stack, and cr0 != LT */
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300130 lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
Matt Evans0ca87f02011-07-20 15:51:00 +0000131 blr
132
133bpf_slow_path_half:
134 bpf_slow_path_common(2)
135 lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
136 blr
137
138bpf_slow_path_byte:
139 bpf_slow_path_common(1)
140 lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
141 blr
142
143bpf_slow_path_byte_msh:
144 bpf_slow_path_common(1)
145 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
146 rlwinm r_X, r_X, 2, 32-4-2, 31-2
147 blr
Jan Seiffert05be1822012-04-29 19:02:19 +0000148
149/* Call out to bpf_internal_load_pointer_neg_helper:
150 * We'll need to back up our volatile regs first; we have
151 * local variable space at r1+(BPF_PPC_STACK_BASIC).
152 * Allocate a new stack frame here to remain ABI-compliant in
153 * stashing LR.
154 */
155#define sk_negative_common(SIZE) \
156 mflr r0; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300157 PPC_STL r0, PPC_LR_STKOFF(r1); \
Jan Seiffert05be1822012-04-29 19:02:19 +0000158 /* R3 goes in parameter space of caller's frame */ \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300159 PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
160 PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
161 PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
162 PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
Jan Seiffert05be1822012-04-29 19:02:19 +0000163 /* R3 = r_skb, as passed */ \
164 mr r4, r_addr; \
165 li r5, SIZE; \
166 bl bpf_internal_load_pointer_neg_helper; \
Michael Ellerman77846552012-06-21 17:50:27 +0000167 nop; \
Jan Seiffert05be1822012-04-29 19:02:19 +0000168 /* R3 != 0 on success */ \
169 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300170 PPC_LL r0, PPC_LR_STKOFF(r1); \
171 PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
172 PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
Jan Seiffert05be1822012-04-29 19:02:19 +0000173 mtlr r0; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300174 PPC_LCMPLI r3, 0; \
Jan Seiffert05be1822012-04-29 19:02:19 +0000175 beq bpf_error_slow; /* cr0 = EQ */ \
176 mr r_addr, r3; \
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300177 PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
Jan Seiffert05be1822012-04-29 19:02:19 +0000178 /* Great success! */
179
180bpf_slow_path_word_neg:
181 lis r_scratch1,-32 /* SKF_LL_OFF */
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300182 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
Jan Seiffert05be1822012-04-29 19:02:19 +0000183 blt bpf_error /* cr0 = LT */
184 .globl sk_load_word_negative_offset
185sk_load_word_negative_offset:
186 sk_negative_common(4)
187 lwz r_A, 0(r_addr)
188 blr
189
190bpf_slow_path_half_neg:
191 lis r_scratch1,-32 /* SKF_LL_OFF */
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300192 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
Jan Seiffert05be1822012-04-29 19:02:19 +0000193 blt bpf_error /* cr0 = LT */
194 .globl sk_load_half_negative_offset
195sk_load_half_negative_offset:
196 sk_negative_common(2)
197 lhz r_A, 0(r_addr)
198 blr
199
200bpf_slow_path_byte_neg:
201 lis r_scratch1,-32 /* SKF_LL_OFF */
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300202 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
Jan Seiffert05be1822012-04-29 19:02:19 +0000203 blt bpf_error /* cr0 = LT */
204 .globl sk_load_byte_negative_offset
205sk_load_byte_negative_offset:
206 sk_negative_common(1)
207 lbz r_A, 0(r_addr)
208 blr
209
210bpf_slow_path_byte_msh_neg:
211 lis r_scratch1,-32 /* SKF_LL_OFF */
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300212 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
Jan Seiffert05be1822012-04-29 19:02:19 +0000213 blt bpf_error /* cr0 = LT */
214 .globl sk_load_byte_msh_negative_offset
215sk_load_byte_msh_negative_offset:
216 sk_negative_common(1)
217 lbz r_X, 0(r_addr)
218 rlwinm r_X, r_X, 2, 32-4-2, 31-2
219 blr
220
221bpf_error_slow:
222 /* fabricate a cr0 = lt */
223 li r_scratch1, -1
Denis Kirjanov09ca5ab2015-02-17 10:04:40 +0300224 PPC_LCMPI r_scratch1, 0
Jan Seiffert05be1822012-04-29 19:02:19 +0000225bpf_error:
226 /* Entered with cr0 = lt */
227 li r3, 0
228 /* Generated code will 'blt epilogue', returning 0. */
229 blr