Jeff Vander Stoep | 39e02b1 | 2020-12-04 13:57:34 +0100 | [diff] [blame] | 1 | // This file is generated from a similarly-named Perl script in the BoringSSL |
| 2 | // source tree. Do not edit by hand. |
| 3 | |
| 4 | #if !defined(__has_feature) |
| 5 | #define __has_feature(x) 0 |
| 6 | #endif |
| 7 | #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) |
| 8 | #define OPENSSL_NO_ASM |
| 9 | #endif |
| 10 | |
| 11 | #if !defined(OPENSSL_NO_ASM) |
| 12 | .syntax unified |
| 13 | |
| 14 | |
| 15 | |
| 16 | |
| 17 | #if defined(__thumb2__) |
| 18 | .thumb |
| 19 | #else |
| 20 | .code 32 |
| 21 | #endif |
| 22 | |
| 23 | .text |
| 24 | |
| 25 | |
| 26 | .align 7 @ totally strategic alignment |
| 27 | _vpaes_consts: |
| 28 | Lk_mc_forward:@ mc_forward |
| 29 | .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 |
| 30 | .quad 0x080B0A0904070605, 0x000302010C0F0E0D |
| 31 | .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 |
| 32 | .quad 0x000302010C0F0E0D, 0x080B0A0904070605 |
| 33 | Lk_mc_backward:@ mc_backward |
| 34 | .quad 0x0605040702010003, 0x0E0D0C0F0A09080B |
| 35 | .quad 0x020100030E0D0C0F, 0x0A09080B06050407 |
| 36 | .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 |
| 37 | .quad 0x0A09080B06050407, 0x020100030E0D0C0F |
| 38 | Lk_sr:@ sr |
| 39 | .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 |
| 40 | .quad 0x030E09040F0A0500, 0x0B06010C07020D08 |
| 41 | .quad 0x0F060D040B020900, 0x070E050C030A0108 |
| 42 | .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 |
| 43 | |
| 44 | @ |
| 45 | @ "Hot" constants |
| 46 | @ |
| 47 | Lk_inv:@ inv, inva |
| 48 | .quad 0x0E05060F0D080180, 0x040703090A0B0C02 |
| 49 | .quad 0x01040A060F0B0780, 0x030D0E0C02050809 |
| 50 | Lk_ipt:@ input transform (lo, hi) |
| 51 | .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 |
| 52 | .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 |
| 53 | Lk_sbo:@ sbou, sbot |
| 54 | .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 |
| 55 | .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA |
| 56 | Lk_sb1:@ sb1u, sb1t |
| 57 | .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF |
| 58 | .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 |
| 59 | Lk_sb2:@ sb2u, sb2t |
| 60 | .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A |
| 61 | .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD |
| 62 | |
| 63 | .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 |
| 64 | .align 2 |
| 65 | |
| 66 | .align 6 |
| 67 | @@ |
| 68 | @@ _aes_preheat |
| 69 | @@ |
| 70 | @@ Fills q9-q15 as specified below. |
| 71 | @@ |
| 72 | #ifdef __thumb2__ |
| 73 | .thumb_func _vpaes_preheat |
| 74 | #endif |
| 75 | .align 4 |
| 76 | _vpaes_preheat: |
| 77 | adr r10, Lk_inv |
| 78 | vmov.i8 q9, #0x0f @ Lk_s0F |
| 79 | vld1.64 {q10,q11}, [r10]! @ Lk_inv |
| 80 | add r10, r10, #64 @ Skip Lk_ipt, Lk_sbo |
| 81 | vld1.64 {q12,q13}, [r10]! @ Lk_sb1 |
| 82 | vld1.64 {q14,q15}, [r10] @ Lk_sb2 |
| 83 | bx lr |
| 84 | |
| 85 | @@ |
| 86 | @@ _aes_encrypt_core |
| 87 | @@ |
| 88 | @@ AES-encrypt q0. |
| 89 | @@ |
| 90 | @@ Inputs: |
| 91 | @@ q0 = input |
| 92 | @@ q9-q15 as in _vpaes_preheat |
| 93 | @@ [r2] = scheduled keys |
| 94 | @@ |
| 95 | @@ Output in q0 |
| 96 | @@ Clobbers q1-q5, r8-r11 |
| 97 | @@ Preserves q6-q8 so you get some local vectors |
| 98 | @@ |
| 99 | @@ |
| 100 | #ifdef __thumb2__ |
| 101 | .thumb_func _vpaes_encrypt_core |
| 102 | #endif |
| 103 | .align 4 |
| 104 | _vpaes_encrypt_core: |
| 105 | mov r9, r2 |
| 106 | ldr r8, [r2,#240] @ pull rounds |
| 107 | adr r11, Lk_ipt |
| 108 | @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo |
| 109 | @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi |
| 110 | vld1.64 {q2, q3}, [r11] |
| 111 | adr r11, Lk_mc_forward+16 |
| 112 | vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key |
| 113 | vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 |
| 114 | vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 |
| 115 | vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1 |
| 116 | vtbl.8 d3, {q2}, d3 |
| 117 | vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2 |
| 118 | vtbl.8 d5, {q3}, d1 |
| 119 | veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0 |
| 120 | veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 |
| 121 | |
| 122 | @ .Lenc_entry ends with a bnz instruction which is normally paired with |
| 123 | @ subs in .Lenc_loop. |
| 124 | tst r8, r8 |
| 125 | b Lenc_entry |
| 126 | |
| 127 | .align 4 |
| 128 | Lenc_loop: |
| 129 | @ middle of middle round |
| 130 | add r10, r11, #0x40 |
| 131 | vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u |
| 132 | vtbl.8 d9, {q13}, d5 |
| 133 | vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] |
| 134 | vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t |
| 135 | vtbl.8 d1, {q12}, d7 |
| 136 | veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k |
| 137 | vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u |
| 138 | vtbl.8 d11, {q15}, d5 |
| 139 | veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A |
| 140 | vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t |
| 141 | vtbl.8 d5, {q14}, d7 |
| 142 | vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] |
| 143 | vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B |
| 144 | vtbl.8 d7, {q0}, d3 |
| 145 | veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A |
| 146 | @ Write to q5 instead of q0, so the table and destination registers do |
| 147 | @ not overlap. |
| 148 | vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D |
| 149 | vtbl.8 d11, {q0}, d9 |
| 150 | veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B |
| 151 | vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C |
| 152 | vtbl.8 d9, {q3}, d3 |
| 153 | @ Here we restore the original q0/q5 usage. |
| 154 | veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D |
| 155 | and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4 |
| 156 | veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D |
| 157 | subs r8, r8, #1 @ nr-- |
| 158 | |
| 159 | Lenc_entry: |
| 160 | @ top of round |
| 161 | vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k |
| 162 | vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i |
| 163 | vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k |
| 164 | vtbl.8 d11, {q11}, d3 |
| 165 | veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j |
| 166 | vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i |
| 167 | vtbl.8 d7, {q10}, d1 |
| 168 | vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j |
| 169 | vtbl.8 d9, {q10}, d3 |
| 170 | veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k |
| 171 | veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k |
| 172 | vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak |
| 173 | vtbl.8 d5, {q10}, d7 |
| 174 | vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak |
| 175 | vtbl.8 d7, {q10}, d9 |
| 176 | veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io |
| 177 | veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo |
| 178 | vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 |
| 179 | bne Lenc_loop |
| 180 | |
| 181 | @ middle of last round |
| 182 | add r10, r11, #0x80 |
| 183 | |
| 184 | adr r11, Lk_sbo |
| 185 | @ Read to q1 instead of q4, so the vtbl.8 instruction below does not |
| 186 | @ overlap table and destination registers. |
| 187 | vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou |
| 188 | vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot Lk_sbo+16 |
| 189 | vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou |
| 190 | vtbl.8 d9, {q1}, d5 |
| 191 | vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] |
| 192 | @ Write to q2 instead of q0 below, to avoid overlapping table and |
| 193 | @ destination registers. |
| 194 | vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t |
| 195 | vtbl.8 d5, {q0}, d7 |
| 196 | veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k |
| 197 | veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A |
| 198 | @ Here we restore the original q0/q2 usage. |
| 199 | vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 |
| 200 | vtbl.8 d1, {q2}, d3 |
| 201 | bx lr |
| 202 | |
| 203 | |
| 204 | .globl _GFp_vpaes_encrypt |
| 205 | .private_extern _GFp_vpaes_encrypt |
| 206 | #ifdef __thumb2__ |
| 207 | .thumb_func _GFp_vpaes_encrypt |
| 208 | #endif |
| 209 | .align 4 |
| 210 | _GFp_vpaes_encrypt: |
| 211 | @ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack |
| 212 | @ alignment. |
| 213 | stmdb sp!, {r7,r8,r9,r10,r11,lr} |
| 214 | @ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved. |
| 215 | vstmdb sp!, {d8,d9,d10,d11} |
| 216 | |
| 217 | vld1.64 {q0}, [r0] |
| 218 | bl _vpaes_preheat |
| 219 | bl _vpaes_encrypt_core |
| 220 | vst1.64 {q0}, [r1] |
| 221 | |
| 222 | vldmia sp!, {d8,d9,d10,d11} |
| 223 | ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return |
| 224 | |
| 225 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |
| 226 | @@ @@ |
| 227 | @@ AES key schedule @@ |
| 228 | @@ @@ |
| 229 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |
| 230 | |
| 231 | @ This function diverges from both x86_64 and armv7 in which constants are |
| 232 | @ pinned. x86_64 has a common preheat function for all operations. aarch64 |
| 233 | @ separates them because it has enough registers to pin nearly all constants. |
| 234 | @ armv7 does not have enough registers, but needing explicit loads and stores |
| 235 | @ also complicates using x86_64's register allocation directly. |
| 236 | @ |
| 237 | @ We pin some constants for convenience and leave q14 and q15 free to load |
| 238 | @ others on demand. |
| 239 | |
| 240 | @ |
| 241 | @ Key schedule constants |
| 242 | @ |
| 243 | |
| 244 | .align 4 |
| 245 | _vpaes_key_consts: |
| 246 | Lk_rcon:@ rcon |
| 247 | .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 |
| 248 | |
| 249 | Lk_opt:@ output transform |
| 250 | .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 |
| 251 | .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 |
| 252 | Lk_deskew:@ deskew tables: inverts the sbox's "skew" |
| 253 | .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A |
| 254 | .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 |
| 255 | |
| 256 | |
| 257 | #ifdef __thumb2__ |
| 258 | .thumb_func _vpaes_key_preheat |
| 259 | #endif |
| 260 | .align 4 |
| 261 | _vpaes_key_preheat: |
| 262 | adr r11, Lk_rcon |
| 263 | vmov.i8 q12, #0x5b @ Lk_s63 |
| 264 | adr r10, Lk_inv @ Must be aligned to 8 mod 16. |
| 265 | vmov.i8 q9, #0x0f @ Lk_s0F |
| 266 | vld1.64 {q10,q11}, [r10] @ Lk_inv |
| 267 | vld1.64 {q8}, [r11] @ Lk_rcon |
| 268 | bx lr |
| 269 | |
| 270 | |
| 271 | #ifdef __thumb2__ |
| 272 | .thumb_func _vpaes_schedule_core |
| 273 | #endif |
| 274 | .align 4 |
| 275 | _vpaes_schedule_core: |
| 276 | @ We only need to save lr, but ARM requires an 8-byte stack alignment, |
| 277 | @ so save an extra register. |
| 278 | stmdb sp!, {r3,lr} |
| 279 | |
| 280 | bl _vpaes_key_preheat @ load the tables |
| 281 | |
| 282 | adr r11, Lk_ipt @ Must be aligned to 8 mod 16. |
| 283 | vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned) |
| 284 | |
| 285 | @ input transform |
| 286 | @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not |
| 287 | @ overlap table and destination. |
| 288 | vmov q4, q0 @ vmovdqa %xmm0, %xmm3 |
| 289 | bl _vpaes_schedule_transform |
| 290 | adr r10, Lk_sr @ Must be aligned to 8 mod 16. |
| 291 | vmov q7, q0 @ vmovdqa %xmm0, %xmm7 |
| 292 | |
| 293 | add r8, r8, r10 |
| 294 | |
| 295 | @ encrypting, output zeroth round key after transform |
| 296 | vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) |
| 297 | |
| 298 | @ *ring*: Decryption removed. |
| 299 | |
| 300 | Lschedule_go: |
| 301 | cmp r1, #192 @ cmp $192, %esi |
| 302 | bhi Lschedule_256 |
| 303 | @ 128: fall though |
| 304 | |
| 305 | @@ |
| 306 | @@ .schedule_128 |
| 307 | @@ |
| 308 | @@ 128-bit specific part of key schedule. |
| 309 | @@ |
| 310 | @@ This schedule is really simple, because all its parts |
| 311 | @@ are accomplished by the subroutines. |
| 312 | @@ |
| 313 | Lschedule_128: |
| 314 | mov r0, #10 @ mov $10, %esi |
| 315 | |
| 316 | Loop_schedule_128: |
| 317 | bl _vpaes_schedule_round |
| 318 | subs r0, r0, #1 @ dec %esi |
| 319 | beq Lschedule_mangle_last |
| 320 | bl _vpaes_schedule_mangle @ write output |
| 321 | b Loop_schedule_128 |
| 322 | |
| 323 | @@ |
| 324 | @@ .aes_schedule_256 |
| 325 | @@ |
| 326 | @@ 256-bit specific part of key schedule. |
| 327 | @@ |
| 328 | @@ The structure here is very similar to the 128-bit |
| 329 | @@ schedule, but with an additional "low side" in |
| 330 | @@ q6. The low side's rounds are the same as the |
| 331 | @@ high side's, except no rcon and no rotation. |
| 332 | @@ |
| 333 | .align 4 |
| 334 | Lschedule_256: |
| 335 | vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) |
| 336 | bl _vpaes_schedule_transform @ input transform |
| 337 | mov r0, #7 @ mov $7, %esi |
| 338 | |
| 339 | Loop_schedule_256: |
| 340 | bl _vpaes_schedule_mangle @ output low result |
| 341 | vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 |
| 342 | |
| 343 | @ high round |
| 344 | bl _vpaes_schedule_round |
| 345 | subs r0, r0, #1 @ dec %esi |
| 346 | beq Lschedule_mangle_last |
| 347 | bl _vpaes_schedule_mangle |
| 348 | |
| 349 | @ low round. swap xmm7 and xmm6 |
| 350 | vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 |
| 351 | vmov.i8 q4, #0 |
| 352 | vmov q5, q7 @ vmovdqa %xmm7, %xmm5 |
| 353 | vmov q7, q6 @ vmovdqa %xmm6, %xmm7 |
| 354 | bl _vpaes_schedule_low_round |
| 355 | vmov q7, q5 @ vmovdqa %xmm5, %xmm7 |
| 356 | |
| 357 | b Loop_schedule_256 |
| 358 | |
| 359 | @@ |
| 360 | @@ .aes_schedule_mangle_last |
| 361 | @@ |
| 362 | @@ Mangler for last round of key schedule |
| 363 | @@ Mangles q0 |
| 364 | @@ when encrypting, outputs out(q0) ^ 63 |
| 365 | @@ when decrypting, outputs unskew(q0) |
| 366 | @@ |
| 367 | @@ Always called right before return... jumps to cleanup and exits |
| 368 | @@ |
| 369 | .align 4 |
| 370 | Lschedule_mangle_last: |
| 371 | @ schedule last round key from xmm0 |
| 372 | adr r11, Lk_deskew @ lea Lk_deskew(%rip),%r11 # prepare to deskew |
| 373 | |
| 374 | @ encrypting |
| 375 | vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1 |
| 376 | adr r11, Lk_opt @ lea Lk_opt(%rip), %r11 # prepare to output transform |
| 377 | add r2, r2, #32 @ add $32, %rdx |
| 378 | vmov q2, q0 |
| 379 | vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute |
| 380 | vtbl.8 d1, {q2}, d3 |
| 381 | |
| 382 | Lschedule_mangle_last_dec: |
| 383 | sub r2, r2, #16 @ add $-16, %rdx |
| 384 | veor q0, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm0 |
| 385 | bl _vpaes_schedule_transform @ output transform |
| 386 | vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key |
| 387 | |
| 388 | @ cleanup |
| 389 | veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0 |
| 390 | veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 |
| 391 | veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2 |
| 392 | veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3 |
| 393 | veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4 |
| 394 | veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5 |
| 395 | veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6 |
| 396 | veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7 |
| 397 | ldmia sp!, {r3,pc} @ return |
| 398 | |
| 399 | |
| 400 | @@ |
| 401 | @@ .aes_schedule_round |
| 402 | @@ |
| 403 | @@ Runs one main round of the key schedule on q0, q7 |
| 404 | @@ |
| 405 | @@ Specifically, runs subbytes on the high dword of q0 |
| 406 | @@ then rotates it by one byte and xors into the low dword of |
| 407 | @@ q7. |
| 408 | @@ |
| 409 | @@ Adds rcon from low byte of q8, then rotates q8 for |
| 410 | @@ next rcon. |
| 411 | @@ |
| 412 | @@ Smears the dwords of q7 by xoring the low into the |
| 413 | @@ second low, result into third, result into highest. |
| 414 | @@ |
| 415 | @@ Returns results in q7 = q0. |
| 416 | @@ Clobbers q1-q4, r11. |
| 417 | @@ |
| 418 | #ifdef __thumb2__ |
| 419 | .thumb_func _vpaes_schedule_round |
| 420 | #endif |
| 421 | .align 4 |
| 422 | _vpaes_schedule_round: |
| 423 | @ extract rcon from xmm8 |
| 424 | vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4 |
| 425 | vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1 |
| 426 | vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8 |
| 427 | veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 |
| 428 | |
| 429 | @ rotate |
| 430 | vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 |
| 431 | vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0 |
| 432 | |
| 433 | @ fall through... |
| 434 | |
| 435 | @ low round: same as high round, but no rotation and no rcon. |
| 436 | _vpaes_schedule_low_round: |
| 437 | @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12. |
| 438 | @ We pin other values in _vpaes_key_preheat, so load them now. |
| 439 | adr r11, Lk_sb1 |
| 440 | vld1.64 {q14,q15}, [r11] |
| 441 | |
| 442 | @ smear xmm7 |
| 443 | vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1 |
| 444 | veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 |
| 445 | vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4 |
| 446 | |
| 447 | @ subbytes |
| 448 | vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k |
| 449 | vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i |
| 450 | veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7 |
| 451 | vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k |
| 452 | vtbl.8 d5, {q11}, d3 |
| 453 | veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j |
| 454 | vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i |
| 455 | vtbl.8 d7, {q10}, d1 |
| 456 | veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k |
| 457 | vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j |
| 458 | vtbl.8 d9, {q10}, d3 |
| 459 | veor q7, q7, q12 @ vpxor Lk_s63(%rip), %xmm7, %xmm7 |
| 460 | vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak |
| 461 | vtbl.8 d7, {q10}, d7 |
| 462 | veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k |
| 463 | vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak |
| 464 | vtbl.8 d5, {q10}, d9 |
| 465 | veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io |
| 466 | veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo |
| 467 | vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou |
| 468 | vtbl.8 d9, {q15}, d7 |
| 469 | vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t |
| 470 | vtbl.8 d3, {q14}, d5 |
| 471 | veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output |
| 472 | |
| 473 | @ add in smeared stuff |
| 474 | veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0 |
| 475 | veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7 |
| 476 | bx lr |
| 477 | |
| 478 | |
| 479 | @@ |
| 480 | @@ .aes_schedule_transform |
| 481 | @@ |
| 482 | @@ Linear-transform q0 according to tables at [r11] |
| 483 | @@ |
| 484 | @@ Requires that q9 = 0x0F0F... as in preheat |
| 485 | @@ Output in q0 |
| 486 | @@ Clobbers q1, q2, q14, q15 |
| 487 | @@ |
| 488 | #ifdef __thumb2__ |
| 489 | .thumb_func _vpaes_schedule_transform |
| 490 | #endif |
| 491 | .align 4 |
| 492 | _vpaes_schedule_transform: |
| 493 | vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo |
| 494 | @ vmovdqa 16(%r11), %xmm1 # hi |
| 495 | vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 |
| 496 | vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 |
| 497 | vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 |
| 498 | vtbl.8 d5, {q14}, d3 |
| 499 | vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 |
| 500 | vtbl.8 d1, {q15}, d1 |
| 501 | veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 |
| 502 | bx lr |
| 503 | |
| 504 | |
| 505 | @@ |
| 506 | @@ .aes_schedule_mangle |
| 507 | @@ |
| 508 | @@ Mangles q0 from (basis-transformed) standard version |
| 509 | @@ to our version. |
| 510 | @@ |
| 511 | @@ On encrypt, |
| 512 | @@ xor with 0x63 |
| 513 | @@ multiply by circulant 0,1,1,1 |
| 514 | @@ apply shiftrows transform |
| 515 | @@ |
| 516 | @@ On decrypt, |
| 517 | @@ xor with 0x63 |
| 518 | @@ multiply by "inverse mixcolumns" circulant E,B,D,9 |
| 519 | @@ deskew |
| 520 | @@ apply shiftrows transform |
| 521 | @@ |
| 522 | @@ |
| 523 | @@ Writes out to [r2], and increments or decrements it |
| 524 | @@ Keeps track of round number mod 4 in r8 |
| 525 | @@ Preserves q0 |
| 526 | @@ Clobbers q1-q5 |
| 527 | @@ |
| 528 | #ifdef __thumb2__ |
| 529 | .thumb_func _vpaes_schedule_mangle |
| 530 | #endif |
| 531 | .align 4 |
| 532 | _vpaes_schedule_mangle: |
| 533 | tst r3, r3 |
| 534 | vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later |
| 535 | adr r11, Lk_mc_forward @ Must be aligned to 8 mod 16. |
| 536 | vld1.64 {q5}, [r11] @ vmovdqa Lk_mc_forward(%rip),%xmm5 |
| 537 | |
| 538 | @ encrypting |
| 539 | @ Write to q2 so we do not overlap table and destination below. |
| 540 | veor q2, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm4 |
| 541 | add r2, r2, #16 @ add $16, %rdx |
| 542 | vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4 |
| 543 | vtbl.8 d9, {q2}, d11 |
| 544 | vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1 |
| 545 | vtbl.8 d3, {q4}, d11 |
| 546 | vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3 |
| 547 | vtbl.8 d7, {q1}, d11 |
| 548 | veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4 |
| 549 | vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 |
| 550 | veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3 |
| 551 | |
| 552 | Lschedule_mangle_both: |
| 553 | @ Write to q2 so table and destination do not overlap. |
| 554 | vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 |
| 555 | vtbl.8 d5, {q3}, d3 |
| 556 | add r8, r8, #64-16 @ add $-16, %r8 |
| 557 | and r8, r8, #~(1<<6) @ and $0x30, %r8 |
| 558 | vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx) |
| 559 | bx lr |
| 560 | |
| 561 | |
| 562 | .globl _GFp_vpaes_set_encrypt_key |
| 563 | .private_extern _GFp_vpaes_set_encrypt_key |
| 564 | #ifdef __thumb2__ |
| 565 | .thumb_func _GFp_vpaes_set_encrypt_key |
| 566 | #endif |
| 567 | .align 4 |
| 568 | _GFp_vpaes_set_encrypt_key: |
| 569 | stmdb sp!, {r7,r8,r9,r10,r11, lr} |
| 570 | vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} |
| 571 | |
| 572 | lsr r9, r1, #5 @ shr $5,%eax |
| 573 | add r9, r9, #5 @ $5,%eax |
| 574 | str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; |
| 575 | |
| 576 | mov r3, #0 @ mov $0,%ecx |
| 577 | mov r8, #0x30 @ mov $0x30,%r8d |
| 578 | bl _vpaes_schedule_core |
| 579 | eor r0, r0, r0 |
| 580 | |
| 581 | vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} |
| 582 | ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return |
| 583 | |
| 584 | |
| 585 | @ Additional constants for converting to bsaes. |
| 586 | |
| 587 | .align 4 |
| 588 | _vpaes_convert_consts: |
| 589 | @ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear |
| 590 | @ transform in the AES S-box. 0x63 is incorporated into the low half of the |
| 591 | @ table. This was computed with the following script: |
| 592 | @ |
| 593 | @ def u64s_to_u128(x, y): |
| 594 | @ return x | (y << 64) |
| 595 | @ def u128_to_u64s(w): |
| 596 | @ return w & ((1<<64)-1), w >> 64 |
| 597 | @ def get_byte(w, i): |
| 598 | @ return (w >> (i*8)) & 0xff |
| 599 | @ def apply_table(table, b): |
| 600 | @ lo = b & 0xf |
| 601 | @ hi = b >> 4 |
| 602 | @ return get_byte(table[0], lo) ^ get_byte(table[1], hi) |
| 603 | @ def opt(b): |
| 604 | @ table = [ |
| 605 | @ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808), |
| 606 | @ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0), |
| 607 | @ ] |
| 608 | @ return apply_table(table, b) |
| 609 | @ def rot_byte(b, n): |
| 610 | @ return 0xff & ((b << n) | (b >> (8-n))) |
| 611 | @ def skew(x): |
| 612 | @ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^ |
| 613 | @ rot_byte(x, 4)) |
| 614 | @ table = [0, 0] |
| 615 | @ for i in range(16): |
| 616 | @ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8) |
| 617 | @ table[1] |= skew(opt(i<<4)) << (i*8) |
| 618 | @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0])) |
| 619 | @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1])) |
| 620 | Lk_opt_then_skew: |
| 621 | .quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b |
| 622 | .quad 0x1f30062936192f00, 0xb49bad829db284ab |
| 623 | |
| 624 | @ void GFp_vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes); |
| 625 | .globl _GFp_vpaes_encrypt_key_to_bsaes |
| 626 | .private_extern _GFp_vpaes_encrypt_key_to_bsaes |
| 627 | #ifdef __thumb2__ |
| 628 | .thumb_func _GFp_vpaes_encrypt_key_to_bsaes |
| 629 | #endif |
| 630 | .align 4 |
| 631 | _GFp_vpaes_encrypt_key_to_bsaes: |
| 632 | stmdb sp!, {r11, lr} |
| 633 | |
| 634 | @ See _vpaes_schedule_core for the key schedule logic. In particular, |
| 635 | @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper), |
| 636 | @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last |
| 637 | @ contain the transformations not in the bsaes representation. This |
| 638 | @ function inverts those transforms. |
| 639 | @ |
| 640 | @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key |
| 641 | @ representation, which does not match the other aes_nohw_* |
| 642 | @ implementations. The ARM aes_nohw_* stores each 32-bit word |
| 643 | @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the |
| 644 | @ cost of extra REV and VREV32 operations in little-endian ARM. |
| 645 | |
| 646 | vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform |
| 647 | adr r2, Lk_mc_forward @ Must be aligned to 8 mod 16. |
| 648 | add r3, r2, 0x90 @ Lk_sr+0x10-Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression) |
| 649 | |
| 650 | vld1.64 {q12}, [r2] |
| 651 | vmov.i8 q10, #0x5b @ Lk_s63 from vpaes-x86_64 |
| 652 | adr r11, Lk_opt @ Must be aligned to 8 mod 16. |
| 653 | vmov.i8 q11, #0x63 @ LK_s63 without Lk_ipt applied |
| 654 | |
| 655 | @ vpaes stores one fewer round count than bsaes, but the number of keys |
| 656 | @ is the same. |
| 657 | ldr r2, [r1,#240] |
| 658 | add r2, r2, #1 |
| 659 | str r2, [r0,#240] |
| 660 | |
| 661 | @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt). |
| 662 | @ Invert this with .Lk_opt. |
| 663 | vld1.64 {q0}, [r1]! |
| 664 | bl _vpaes_schedule_transform |
| 665 | vrev32.8 q0, q0 |
| 666 | vst1.64 {q0}, [r0]! |
| 667 | |
| 668 | @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied, |
| 669 | @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63, |
| 670 | @ multiplies by the circulant 0,1,1,1, then applies ShiftRows. |
| 671 | Loop_enc_key_to_bsaes: |
| 672 | vld1.64 {q0}, [r1]! |
| 673 | |
| 674 | @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle |
| 675 | @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30. |
| 676 | @ We use r3 rather than r8 to avoid a callee-saved register. |
| 677 | vld1.64 {q1}, [r3] |
| 678 | vtbl.8 d4, {q0}, d2 |
| 679 | vtbl.8 d5, {q0}, d3 |
| 680 | add r3, r3, #16 |
| 681 | and r3, r3, #~(1<<6) |
| 682 | vmov q0, q2 |
| 683 | |
| 684 | @ Handle the last key differently. |
| 685 | subs r2, r2, #1 |
| 686 | beq Loop_enc_key_to_bsaes_last |
| 687 | |
| 688 | @ Multiply by the circulant. This is its own inverse. |
| 689 | vtbl.8 d2, {q0}, d24 |
| 690 | vtbl.8 d3, {q0}, d25 |
| 691 | vmov q0, q1 |
| 692 | vtbl.8 d4, {q1}, d24 |
| 693 | vtbl.8 d5, {q1}, d25 |
| 694 | veor q0, q0, q2 |
| 695 | vtbl.8 d2, {q2}, d24 |
| 696 | vtbl.8 d3, {q2}, d25 |
| 697 | veor q0, q0, q1 |
| 698 | |
| 699 | @ XOR and finish. |
| 700 | veor q0, q0, q10 |
| 701 | bl _vpaes_schedule_transform |
| 702 | vrev32.8 q0, q0 |
| 703 | vst1.64 {q0}, [r0]! |
| 704 | b Loop_enc_key_to_bsaes |
| 705 | |
| 706 | Loop_enc_key_to_bsaes_last: |
| 707 | @ The final key does not have a basis transform (note |
| 708 | @ .Lschedule_mangle_last inverts the original transform). It only XORs |
| 709 | @ 0x63 and applies ShiftRows. The latter was already inverted in the |
| 710 | @ loop. Note that, because we act on the original representation, we use |
| 711 | @ q11, not q10. |
| 712 | veor q0, q0, q11 |
| 713 | vrev32.8 q0, q0 |
| 714 | vst1.64 {q0}, [r0] |
| 715 | |
| 716 | @ Wipe registers which contained key material. |
| 717 | veor q0, q0, q0 |
| 718 | veor q1, q1, q1 |
| 719 | veor q2, q2, q2 |
| 720 | |
| 721 | ldmia sp!, {r11, pc} @ return |
| 722 | |
| 723 | .globl _GFp_vpaes_ctr32_encrypt_blocks |
| 724 | .private_extern _GFp_vpaes_ctr32_encrypt_blocks |
| 725 | #ifdef __thumb2__ |
| 726 | .thumb_func _GFp_vpaes_ctr32_encrypt_blocks |
| 727 | #endif |
| 728 | .align 4 |
| 729 | _GFp_vpaes_ctr32_encrypt_blocks: |
| 730 | mov ip, sp |
| 731 | stmdb sp!, {r7,r8,r9,r10,r11, lr} |
| 732 | @ This function uses q4-q7 (d8-d15), which are callee-saved. |
| 733 | vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} |
| 734 | |
| 735 | cmp r2, #0 |
| 736 | @ r8 is passed on the stack. |
| 737 | ldr r8, [ip] |
| 738 | beq Lctr32_done |
| 739 | |
| 740 | @ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3. |
| 741 | mov r9, r3 |
| 742 | mov r3, r2 |
| 743 | mov r2, r9 |
| 744 | |
| 745 | @ Load the IV and counter portion. |
| 746 | ldr r7, [r8, #12] |
| 747 | vld1.8 {q7}, [r8] |
| 748 | |
| 749 | bl _vpaes_preheat |
| 750 | rev r7, r7 @ The counter is big-endian. |
| 751 | |
| 752 | Lctr32_loop: |
| 753 | vmov q0, q7 |
| 754 | vld1.8 {q6}, [r0]! @ Load input ahead of time |
| 755 | bl _vpaes_encrypt_core |
| 756 | veor q0, q0, q6 @ XOR input and result |
| 757 | vst1.8 {q0}, [r1]! |
| 758 | subs r3, r3, #1 |
| 759 | @ Update the counter. |
| 760 | add r7, r7, #1 |
| 761 | rev r9, r7 |
| 762 | vmov.32 d15[1], r9 |
| 763 | bne Lctr32_loop |
| 764 | |
| 765 | Lctr32_done: |
| 766 | vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} |
| 767 | ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return |
| 768 | |
| 769 | #endif // !OPENSSL_NO_ASM |