Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 1 | /* |
| 2 | * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF |
| 3 | * compiler. |
| 4 | * |
| 5 | * Copyright (C) 2015 Imagination Technologies Ltd. |
| 6 | * Author: Markos Chandras <markos.chandras@imgtec.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the |
| 10 | * Free Software Foundation; version 2 of the License. |
| 11 | */ |
| 12 | |
| 13 | #include <asm/asm.h> |
Matt Redfearn | 13b8638 | 2018-02-26 17:02:44 +0000 | [diff] [blame] | 14 | #include <asm/isa-rev.h> |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 15 | #include <asm/regdef.h> |
| 16 | #include "bpf_jit.h" |
| 17 | |
| 18 | /* ABI |
| 19 | * |
| 20 | * r_skb_hl skb header length |
| 21 | * r_skb_data skb data |
| 22 | * r_off(a1) offset register |
| 23 | * r_A BPF register A |
| 24 | * r_X PF register X |
| 25 | * r_skb(a0) *skb |
| 26 | * r_M *scratch memory |
| 27 | * r_skb_le skb length |
| 28 | * r_s0 Scratch register 0 |
| 29 | * r_s1 Scratch register 1 |
| 30 | * |
| 31 | * On entry: |
| 32 | * a0: *skb |
| 33 | * a1: offset (imm or imm + X) |
| 34 | * |
| 35 | * All non-BPF-ABI registers are free for use. On return, we only |
| 36 | * care about r_ret. The BPF-ABI registers are assumed to remain |
| 37 | * unmodified during the entire filter operation. |
| 38 | */ |
| 39 | |
| 40 | #define skb a0 |
| 41 | #define offset a1 |
| 42 | #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ |
| 43 | |
| 44 | /* We know better :) so prevent assembler reordering etc */ |
| 45 | .set noreorder |
| 46 | |
| 47 | #define is_offset_negative(TYPE) \ |
| 48 | /* If offset is negative we have more work to do */ \ |
| 49 | slti t0, offset, 0; \ |
| 50 | bgtz t0, bpf_slow_path_##TYPE##_neg; \ |
| 51 | /* Be careful what follows in DS. */ |
| 52 | |
| 53 | #define is_offset_in_header(SIZE, TYPE) \ |
| 54 | /* Reading from header? */ \ |
| 55 | addiu $r_s0, $r_skb_hl, -SIZE; \ |
| 56 | slt t0, $r_s0, offset; \ |
| 57 | bgtz t0, bpf_slow_path_##TYPE; \ |
| 58 | |
| 59 | LEAF(sk_load_word) |
| 60 | is_offset_negative(word) |
Ralf Baechle | 1e16a8f | 2015-10-01 15:45:44 +0200 | [diff] [blame] | 61 | FEXPORT(sk_load_word_positive) |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 62 | is_offset_in_header(4, word) |
| 63 | /* Offset within header boundaries */ |
| 64 | PTR_ADDU t1, $r_skb_data, offset |
Ralf Baechle | 0c5d187 | 2015-10-02 09:48:57 +0200 | [diff] [blame] | 65 | .set reorder |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 66 | lw $r_A, 0(t1) |
Ralf Baechle | 0c5d187 | 2015-10-02 09:48:57 +0200 | [diff] [blame] | 67 | .set noreorder |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 68 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
Matt Redfearn | 13b8638 | 2018-02-26 17:02:44 +0000 | [diff] [blame] | 69 | # if MIPS_ISA_REV >= 2 |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 70 | wsbh t0, $r_A |
| 71 | rotr $r_A, t0, 16 |
Aurelien Jarno | b259e51 | 2015-09-05 18:46:57 +0200 | [diff] [blame] | 72 | # else |
| 73 | sll t0, $r_A, 24 |
| 74 | srl t1, $r_A, 24 |
| 75 | srl t2, $r_A, 8 |
| 76 | or t0, t0, t1 |
| 77 | andi t2, t2, 0xff00 |
| 78 | andi t1, $r_A, 0xff00 |
| 79 | or t0, t0, t2 |
| 80 | sll t1, t1, 8 |
| 81 | or $r_A, t0, t1 |
| 82 | # endif |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 83 | #endif |
| 84 | jr $r_ra |
| 85 | move $r_ret, zero |
| 86 | END(sk_load_word) |
| 87 | |
| 88 | LEAF(sk_load_half) |
| 89 | is_offset_negative(half) |
Ralf Baechle | 1e16a8f | 2015-10-01 15:45:44 +0200 | [diff] [blame] | 90 | FEXPORT(sk_load_half_positive) |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 91 | is_offset_in_header(2, half) |
| 92 | /* Offset within header boundaries */ |
| 93 | PTR_ADDU t1, $r_skb_data, offset |
David Daney | a81507c | 2017-03-14 14:21:44 -0700 | [diff] [blame] | 94 | lhu $r_A, 0(t1) |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 95 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
Matt Redfearn | 13b8638 | 2018-02-26 17:02:44 +0000 | [diff] [blame] | 96 | # if MIPS_ISA_REV >= 2 |
David Daney | a81507c | 2017-03-14 14:21:44 -0700 | [diff] [blame] | 97 | wsbh $r_A, $r_A |
Aurelien Jarno | b259e51 | 2015-09-05 18:46:57 +0200 | [diff] [blame] | 98 | # else |
David Daney | a81507c | 2017-03-14 14:21:44 -0700 | [diff] [blame] | 99 | sll t0, $r_A, 8 |
| 100 | srl t1, $r_A, 8 |
| 101 | andi t0, t0, 0xff00 |
Aurelien Jarno | b259e51 | 2015-09-05 18:46:57 +0200 | [diff] [blame] | 102 | or $r_A, t0, t1 |
| 103 | # endif |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 104 | #endif |
| 105 | jr $r_ra |
| 106 | move $r_ret, zero |
| 107 | END(sk_load_half) |
| 108 | |
| 109 | LEAF(sk_load_byte) |
| 110 | is_offset_negative(byte) |
Ralf Baechle | 1e16a8f | 2015-10-01 15:45:44 +0200 | [diff] [blame] | 111 | FEXPORT(sk_load_byte_positive) |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 112 | is_offset_in_header(1, byte) |
| 113 | /* Offset within header boundaries */ |
| 114 | PTR_ADDU t1, $r_skb_data, offset |
David Daney | a81507c | 2017-03-14 14:21:44 -0700 | [diff] [blame] | 115 | lbu $r_A, 0(t1) |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 116 | jr $r_ra |
| 117 | move $r_ret, zero |
| 118 | END(sk_load_byte) |
| 119 | |
| 120 | /* |
| 121 | * call skb_copy_bits: |
| 122 | * (prototype in linux/skbuff.h) |
| 123 | * |
| 124 | * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) |
| 125 | * |
| 126 | * o32 mandates we leave 4 spaces for argument registers in case |
| 127 | * the callee needs to use them. Even though we don't care about |
| 128 | * the argument registers ourselves, we need to allocate that space |
| 129 | * to remain ABI compliant since the callee may want to use that space. |
| 130 | * We also allocate 2 more spaces for $r_ra and our return register (*to). |
| 131 | * |
| 132 | * n64 is a bit different. The *caller* will allocate the space to preserve |
| 133 | * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no |
| 134 | * good reason but it does not matter that much really. |
| 135 | * |
| 136 | * (void *to) is returned in r_s0 |
| 137 | * |
| 138 | */ |
David Daney | a81507c | 2017-03-14 14:21:44 -0700 | [diff] [blame] | 139 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 140 | #define DS_OFFSET(SIZE) (4 * SZREG) |
| 141 | #else |
| 142 | #define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) |
| 143 | #endif |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 144 | #define bpf_slow_path_common(SIZE) \ |
| 145 | /* Quick check. Are we within reasonable boundaries? */ \ |
| 146 | LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ |
| 147 | sltu $r_s0, offset, $r_s1; \ |
| 148 | beqz $r_s0, fault; \ |
| 149 | /* Load 4th argument in DS */ \ |
| 150 | LONG_ADDIU a3, zero, SIZE; \ |
| 151 | PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ |
| 152 | PTR_LA t0, skb_copy_bits; \ |
| 153 | PTR_S $r_ra, (5 * SZREG)($r_sp); \ |
| 154 | /* Assign low slot to a2 */ \ |
David Daney | a81507c | 2017-03-14 14:21:44 -0700 | [diff] [blame] | 155 | PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 156 | jalr t0; \ |
| 157 | /* Reset our destination slot (DS but it's ok) */ \ |
| 158 | INT_S zero, (4 * SZREG)($r_sp); \ |
| 159 | /* \ |
| 160 | * skb_copy_bits returns 0 on success and -EFAULT \ |
| 161 | * on error. Our data live in a2. Do not bother with \ |
| 162 | * our data if an error has been returned. \ |
| 163 | */ \ |
| 164 | /* Restore our frame */ \ |
| 165 | PTR_L $r_ra, (5 * SZREG)($r_sp); \ |
| 166 | INT_L $r_s0, (4 * SZREG)($r_sp); \ |
| 167 | bltz v0, fault; \ |
| 168 | PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ |
| 169 | move $r_ret, zero; \ |
| 170 | |
| 171 | NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) |
| 172 | bpf_slow_path_common(4) |
| 173 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
Matt Redfearn | 13b8638 | 2018-02-26 17:02:44 +0000 | [diff] [blame] | 174 | # if MIPS_ISA_REV >= 2 |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 175 | wsbh t0, $r_s0 |
| 176 | jr $r_ra |
| 177 | rotr $r_A, t0, 16 |
Aurelien Jarno | b259e51 | 2015-09-05 18:46:57 +0200 | [diff] [blame] | 178 | # else |
| 179 | sll t0, $r_s0, 24 |
| 180 | srl t1, $r_s0, 24 |
| 181 | srl t2, $r_s0, 8 |
| 182 | or t0, t0, t1 |
| 183 | andi t2, t2, 0xff00 |
| 184 | andi t1, $r_s0, 0xff00 |
| 185 | or t0, t0, t2 |
| 186 | sll t1, t1, 8 |
| 187 | jr $r_ra |
| 188 | or $r_A, t0, t1 |
| 189 | # endif |
Aurelien Jarno | faa9724 | 2015-09-05 18:46:56 +0200 | [diff] [blame] | 190 | #else |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 191 | jr $r_ra |
Aurelien Jarno | faa9724 | 2015-09-05 18:46:56 +0200 | [diff] [blame] | 192 | move $r_A, $r_s0 |
| 193 | #endif |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 194 | |
| 195 | END(bpf_slow_path_word) |
| 196 | |
| 197 | NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) |
| 198 | bpf_slow_path_common(2) |
| 199 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
Matt Redfearn | 13b8638 | 2018-02-26 17:02:44 +0000 | [diff] [blame] | 200 | # if MIPS_ISA_REV >= 2 |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 201 | jr $r_ra |
| 202 | wsbh $r_A, $r_s0 |
Aurelien Jarno | b259e51 | 2015-09-05 18:46:57 +0200 | [diff] [blame] | 203 | # else |
| 204 | sll t0, $r_s0, 8 |
| 205 | andi t1, $r_s0, 0xff00 |
| 206 | andi t0, t0, 0xff00 |
| 207 | srl t1, t1, 8 |
| 208 | jr $r_ra |
| 209 | or $r_A, t0, t1 |
| 210 | # endif |
Aurelien Jarno | faa9724 | 2015-09-05 18:46:56 +0200 | [diff] [blame] | 211 | #else |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 212 | jr $r_ra |
| 213 | move $r_A, $r_s0 |
Aurelien Jarno | faa9724 | 2015-09-05 18:46:56 +0200 | [diff] [blame] | 214 | #endif |
Markos Chandras | 266a88e | 2015-06-04 11:56:16 +0100 | [diff] [blame] | 215 | |
| 216 | END(bpf_slow_path_half) |
| 217 | |
| 218 | NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) |
| 219 | bpf_slow_path_common(1) |
| 220 | jr $r_ra |
| 221 | move $r_A, $r_s0 |
| 222 | |
| 223 | END(bpf_slow_path_byte) |
| 224 | |
| 225 | /* |
| 226 | * Negative entry points |
| 227 | */ |
| 228 | .macro bpf_is_end_of_data |
| 229 | li t0, SKF_LL_OFF |
| 230 | /* Reading link layer data? */ |
| 231 | slt t1, offset, t0 |
| 232 | bgtz t1, fault |
| 233 | /* Be careful what follows in DS. */ |
| 234 | .endm |
| 235 | /* |
| 236 | * call skb_copy_bits: |
| 237 | * (prototype in linux/filter.h) |
| 238 | * |
| 239 | * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, |
| 240 | * int k, unsigned int size) |
| 241 | * |
| 242 | * see above (bpf_slow_path_common) for ABI restrictions |
| 243 | */ |
| 244 | #define bpf_negative_common(SIZE) \ |
| 245 | PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ |
| 246 | PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ |
| 247 | PTR_S $r_ra, (5 * SZREG)($r_sp); \ |
| 248 | jalr t0; \ |
| 249 | li a2, SIZE; \ |
| 250 | PTR_L $r_ra, (5 * SZREG)($r_sp); \ |
| 251 | /* Check return pointer */ \ |
| 252 | beqz v0, fault; \ |
| 253 | PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ |
| 254 | /* Preserve our pointer */ \ |
| 255 | move $r_s0, v0; \ |
| 256 | /* Set return value */ \ |
| 257 | move $r_ret, zero; \ |
| 258 | |
| 259 | bpf_slow_path_word_neg: |
| 260 | bpf_is_end_of_data |
| 261 | NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) |
| 262 | bpf_negative_common(4) |
| 263 | jr $r_ra |
| 264 | lw $r_A, 0($r_s0) |
| 265 | END(sk_load_word_negative) |
| 266 | |
| 267 | bpf_slow_path_half_neg: |
| 268 | bpf_is_end_of_data |
| 269 | NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) |
| 270 | bpf_negative_common(2) |
| 271 | jr $r_ra |
| 272 | lhu $r_A, 0($r_s0) |
| 273 | END(sk_load_half_negative) |
| 274 | |
| 275 | bpf_slow_path_byte_neg: |
| 276 | bpf_is_end_of_data |
| 277 | NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) |
| 278 | bpf_negative_common(1) |
| 279 | jr $r_ra |
| 280 | lbu $r_A, 0($r_s0) |
| 281 | END(sk_load_byte_negative) |
| 282 | |
| 283 | fault: |
| 284 | jr $r_ra |
| 285 | addiu $r_ret, zero, 1 |