Martin Willi | b1ccc8f | 2015-07-16 19:14:08 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions |
| 3 | * |
| 4 | * Copyright (C) 2015 Martin Willi |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/linkage.h> |
| 13 | |
| 14 | .data |
| 15 | .align 32 |
| 16 | |
| 17 | ANMASK: .octa 0x0000000003ffffff0000000003ffffff |
| 18 | .octa 0x0000000003ffffff0000000003ffffff |
| 19 | ORMASK: .octa 0x00000000010000000000000001000000 |
| 20 | .octa 0x00000000010000000000000001000000 |
| 21 | |
| 22 | .text |
| 23 | |
| 24 | #define h0 0x00(%rdi) |
| 25 | #define h1 0x04(%rdi) |
| 26 | #define h2 0x08(%rdi) |
| 27 | #define h3 0x0c(%rdi) |
| 28 | #define h4 0x10(%rdi) |
| 29 | #define r0 0x00(%rdx) |
| 30 | #define r1 0x04(%rdx) |
| 31 | #define r2 0x08(%rdx) |
| 32 | #define r3 0x0c(%rdx) |
| 33 | #define r4 0x10(%rdx) |
| 34 | #define u0 0x00(%r8) |
| 35 | #define u1 0x04(%r8) |
| 36 | #define u2 0x08(%r8) |
| 37 | #define u3 0x0c(%r8) |
| 38 | #define u4 0x10(%r8) |
| 39 | #define w0 0x14(%r8) |
| 40 | #define w1 0x18(%r8) |
| 41 | #define w2 0x1c(%r8) |
| 42 | #define w3 0x20(%r8) |
| 43 | #define w4 0x24(%r8) |
| 44 | #define y0 0x28(%r8) |
| 45 | #define y1 0x2c(%r8) |
| 46 | #define y2 0x30(%r8) |
| 47 | #define y3 0x34(%r8) |
| 48 | #define y4 0x38(%r8) |
| 49 | #define m %rsi |
| 50 | #define hc0 %ymm0 |
| 51 | #define hc1 %ymm1 |
| 52 | #define hc2 %ymm2 |
| 53 | #define hc3 %ymm3 |
| 54 | #define hc4 %ymm4 |
| 55 | #define hc0x %xmm0 |
| 56 | #define hc1x %xmm1 |
| 57 | #define hc2x %xmm2 |
| 58 | #define hc3x %xmm3 |
| 59 | #define hc4x %xmm4 |
| 60 | #define t1 %ymm5 |
| 61 | #define t2 %ymm6 |
| 62 | #define t1x %xmm5 |
| 63 | #define t2x %xmm6 |
| 64 | #define ruwy0 %ymm7 |
| 65 | #define ruwy1 %ymm8 |
| 66 | #define ruwy2 %ymm9 |
| 67 | #define ruwy3 %ymm10 |
| 68 | #define ruwy4 %ymm11 |
| 69 | #define ruwy0x %xmm7 |
| 70 | #define ruwy1x %xmm8 |
| 71 | #define ruwy2x %xmm9 |
| 72 | #define ruwy3x %xmm10 |
| 73 | #define ruwy4x %xmm11 |
| 74 | #define svxz1 %ymm12 |
| 75 | #define svxz2 %ymm13 |
| 76 | #define svxz3 %ymm14 |
| 77 | #define svxz4 %ymm15 |
| 78 | #define d0 %r9 |
| 79 | #define d1 %r10 |
| 80 | #define d2 %r11 |
| 81 | #define d3 %r12 |
| 82 | #define d4 %r13 |
| 83 | |
| 84 | ENTRY(poly1305_4block_avx2) |
| 85 | # %rdi: Accumulator h[5] |
| 86 | # %rsi: 64 byte input block m |
| 87 | # %rdx: Poly1305 key r[5] |
| 88 | # %rcx: Quadblock count |
| 89 | # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5], |
| 90 | |
| 91 | # This four-block variant uses loop unrolled block processing. It |
| 92 | # requires 4 Poly1305 keys: r, r^2, r^3 and r^4: |
| 93 | # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r |
| 94 | |
| 95 | vzeroupper |
| 96 | push %rbx |
| 97 | push %r12 |
| 98 | push %r13 |
| 99 | |
| 100 | # combine r0,u0,w0,y0 |
| 101 | vmovd y0,ruwy0x |
| 102 | vmovd w0,t1x |
| 103 | vpunpcklqdq t1,ruwy0,ruwy0 |
| 104 | vmovd u0,t1x |
| 105 | vmovd r0,t2x |
| 106 | vpunpcklqdq t2,t1,t1 |
| 107 | vperm2i128 $0x20,t1,ruwy0,ruwy0 |
| 108 | |
| 109 | # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 |
| 110 | vmovd y1,ruwy1x |
| 111 | vmovd w1,t1x |
| 112 | vpunpcklqdq t1,ruwy1,ruwy1 |
| 113 | vmovd u1,t1x |
| 114 | vmovd r1,t2x |
| 115 | vpunpcklqdq t2,t1,t1 |
| 116 | vperm2i128 $0x20,t1,ruwy1,ruwy1 |
| 117 | vpslld $2,ruwy1,svxz1 |
| 118 | vpaddd ruwy1,svxz1,svxz1 |
| 119 | |
| 120 | # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 |
| 121 | vmovd y2,ruwy2x |
| 122 | vmovd w2,t1x |
| 123 | vpunpcklqdq t1,ruwy2,ruwy2 |
| 124 | vmovd u2,t1x |
| 125 | vmovd r2,t2x |
| 126 | vpunpcklqdq t2,t1,t1 |
| 127 | vperm2i128 $0x20,t1,ruwy2,ruwy2 |
| 128 | vpslld $2,ruwy2,svxz2 |
| 129 | vpaddd ruwy2,svxz2,svxz2 |
| 130 | |
| 131 | # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5 |
| 132 | vmovd y3,ruwy3x |
| 133 | vmovd w3,t1x |
| 134 | vpunpcklqdq t1,ruwy3,ruwy3 |
| 135 | vmovd u3,t1x |
| 136 | vmovd r3,t2x |
| 137 | vpunpcklqdq t2,t1,t1 |
| 138 | vperm2i128 $0x20,t1,ruwy3,ruwy3 |
| 139 | vpslld $2,ruwy3,svxz3 |
| 140 | vpaddd ruwy3,svxz3,svxz3 |
| 141 | |
| 142 | # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5 |
| 143 | vmovd y4,ruwy4x |
| 144 | vmovd w4,t1x |
| 145 | vpunpcklqdq t1,ruwy4,ruwy4 |
| 146 | vmovd u4,t1x |
| 147 | vmovd r4,t2x |
| 148 | vpunpcklqdq t2,t1,t1 |
| 149 | vperm2i128 $0x20,t1,ruwy4,ruwy4 |
| 150 | vpslld $2,ruwy4,svxz4 |
| 151 | vpaddd ruwy4,svxz4,svxz4 |
| 152 | |
| 153 | .Ldoblock4: |
| 154 | # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff, |
| 155 | # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0] |
| 156 | vmovd 0x00(m),hc0x |
| 157 | vmovd 0x10(m),t1x |
| 158 | vpunpcklqdq t1,hc0,hc0 |
| 159 | vmovd 0x20(m),t1x |
| 160 | vmovd 0x30(m),t2x |
| 161 | vpunpcklqdq t2,t1,t1 |
| 162 | vperm2i128 $0x20,t1,hc0,hc0 |
| 163 | vpand ANMASK(%rip),hc0,hc0 |
| 164 | vmovd h0,t1x |
| 165 | vpaddd t1,hc0,hc0 |
| 166 | # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff, |
| 167 | # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1] |
| 168 | vmovd 0x03(m),hc1x |
| 169 | vmovd 0x13(m),t1x |
| 170 | vpunpcklqdq t1,hc1,hc1 |
| 171 | vmovd 0x23(m),t1x |
| 172 | vmovd 0x33(m),t2x |
| 173 | vpunpcklqdq t2,t1,t1 |
| 174 | vperm2i128 $0x20,t1,hc1,hc1 |
| 175 | vpsrld $2,hc1,hc1 |
| 176 | vpand ANMASK(%rip),hc1,hc1 |
| 177 | vmovd h1,t1x |
| 178 | vpaddd t1,hc1,hc1 |
| 179 | # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff, |
| 180 | # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2] |
| 181 | vmovd 0x06(m),hc2x |
| 182 | vmovd 0x16(m),t1x |
| 183 | vpunpcklqdq t1,hc2,hc2 |
| 184 | vmovd 0x26(m),t1x |
| 185 | vmovd 0x36(m),t2x |
| 186 | vpunpcklqdq t2,t1,t1 |
| 187 | vperm2i128 $0x20,t1,hc2,hc2 |
| 188 | vpsrld $4,hc2,hc2 |
| 189 | vpand ANMASK(%rip),hc2,hc2 |
| 190 | vmovd h2,t1x |
| 191 | vpaddd t1,hc2,hc2 |
| 192 | # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff, |
| 193 | # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3] |
| 194 | vmovd 0x09(m),hc3x |
| 195 | vmovd 0x19(m),t1x |
| 196 | vpunpcklqdq t1,hc3,hc3 |
| 197 | vmovd 0x29(m),t1x |
| 198 | vmovd 0x39(m),t2x |
| 199 | vpunpcklqdq t2,t1,t1 |
| 200 | vperm2i128 $0x20,t1,hc3,hc3 |
| 201 | vpsrld $6,hc3,hc3 |
| 202 | vpand ANMASK(%rip),hc3,hc3 |
| 203 | vmovd h3,t1x |
| 204 | vpaddd t1,hc3,hc3 |
| 205 | # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24), |
| 206 | # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4] |
| 207 | vmovd 0x0c(m),hc4x |
| 208 | vmovd 0x1c(m),t1x |
| 209 | vpunpcklqdq t1,hc4,hc4 |
| 210 | vmovd 0x2c(m),t1x |
| 211 | vmovd 0x3c(m),t2x |
| 212 | vpunpcklqdq t2,t1,t1 |
| 213 | vperm2i128 $0x20,t1,hc4,hc4 |
| 214 | vpsrld $8,hc4,hc4 |
| 215 | vpor ORMASK(%rip),hc4,hc4 |
| 216 | vmovd h4,t1x |
| 217 | vpaddd t1,hc4,hc4 |
| 218 | |
| 219 | # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] |
| 220 | vpmuludq hc0,ruwy0,t1 |
| 221 | # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ] |
| 222 | vpmuludq hc1,svxz4,t2 |
| 223 | vpaddq t2,t1,t1 |
| 224 | # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ] |
| 225 | vpmuludq hc2,svxz3,t2 |
| 226 | vpaddq t2,t1,t1 |
| 227 | # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ] |
| 228 | vpmuludq hc3,svxz2,t2 |
| 229 | vpaddq t2,t1,t1 |
| 230 | # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ] |
| 231 | vpmuludq hc4,svxz1,t2 |
| 232 | vpaddq t2,t1,t1 |
| 233 | # d0 = t1[0] + t1[1] + t[2] + t[3] |
| 234 | vpermq $0xee,t1,t2 |
| 235 | vpaddq t2,t1,t1 |
| 236 | vpsrldq $8,t1,t2 |
| 237 | vpaddq t2,t1,t1 |
| 238 | vmovq t1x,d0 |
| 239 | |
| 240 | # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] |
| 241 | vpmuludq hc0,ruwy1,t1 |
| 242 | # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] |
| 243 | vpmuludq hc1,ruwy0,t2 |
| 244 | vpaddq t2,t1,t1 |
| 245 | # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ] |
| 246 | vpmuludq hc2,svxz4,t2 |
| 247 | vpaddq t2,t1,t1 |
| 248 | # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ] |
| 249 | vpmuludq hc3,svxz3,t2 |
| 250 | vpaddq t2,t1,t1 |
| 251 | # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ] |
| 252 | vpmuludq hc4,svxz2,t2 |
| 253 | vpaddq t2,t1,t1 |
| 254 | # d1 = t1[0] + t1[1] + t1[3] + t1[4] |
| 255 | vpermq $0xee,t1,t2 |
| 256 | vpaddq t2,t1,t1 |
| 257 | vpsrldq $8,t1,t2 |
| 258 | vpaddq t2,t1,t1 |
| 259 | vmovq t1x,d1 |
| 260 | |
| 261 | # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] |
| 262 | vpmuludq hc0,ruwy2,t1 |
| 263 | # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] |
| 264 | vpmuludq hc1,ruwy1,t2 |
| 265 | vpaddq t2,t1,t1 |
| 266 | # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] |
| 267 | vpmuludq hc2,ruwy0,t2 |
| 268 | vpaddq t2,t1,t1 |
| 269 | # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ] |
| 270 | vpmuludq hc3,svxz4,t2 |
| 271 | vpaddq t2,t1,t1 |
| 272 | # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ] |
| 273 | vpmuludq hc4,svxz3,t2 |
| 274 | vpaddq t2,t1,t1 |
| 275 | # d2 = t1[0] + t1[1] + t1[2] + t1[3] |
| 276 | vpermq $0xee,t1,t2 |
| 277 | vpaddq t2,t1,t1 |
| 278 | vpsrldq $8,t1,t2 |
| 279 | vpaddq t2,t1,t1 |
| 280 | vmovq t1x,d2 |
| 281 | |
| 282 | # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ] |
| 283 | vpmuludq hc0,ruwy3,t1 |
| 284 | # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] |
| 285 | vpmuludq hc1,ruwy2,t2 |
| 286 | vpaddq t2,t1,t1 |
| 287 | # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] |
| 288 | vpmuludq hc2,ruwy1,t2 |
| 289 | vpaddq t2,t1,t1 |
| 290 | # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] |
| 291 | vpmuludq hc3,ruwy0,t2 |
| 292 | vpaddq t2,t1,t1 |
| 293 | # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ] |
| 294 | vpmuludq hc4,svxz4,t2 |
| 295 | vpaddq t2,t1,t1 |
| 296 | # d3 = t1[0] + t1[1] + t1[2] + t1[3] |
| 297 | vpermq $0xee,t1,t2 |
| 298 | vpaddq t2,t1,t1 |
| 299 | vpsrldq $8,t1,t2 |
| 300 | vpaddq t2,t1,t1 |
| 301 | vmovq t1x,d3 |
| 302 | |
| 303 | # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ] |
| 304 | vpmuludq hc0,ruwy4,t1 |
| 305 | # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ] |
| 306 | vpmuludq hc1,ruwy3,t2 |
| 307 | vpaddq t2,t1,t1 |
| 308 | # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ] |
| 309 | vpmuludq hc2,ruwy2,t2 |
| 310 | vpaddq t2,t1,t1 |
| 311 | # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ] |
| 312 | vpmuludq hc3,ruwy1,t2 |
| 313 | vpaddq t2,t1,t1 |
| 314 | # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ] |
| 315 | vpmuludq hc4,ruwy0,t2 |
| 316 | vpaddq t2,t1,t1 |
| 317 | # d4 = t1[0] + t1[1] + t1[2] + t1[3] |
| 318 | vpermq $0xee,t1,t2 |
| 319 | vpaddq t2,t1,t1 |
| 320 | vpsrldq $8,t1,t2 |
| 321 | vpaddq t2,t1,t1 |
| 322 | vmovq t1x,d4 |
| 323 | |
| 324 | # d1 += d0 >> 26 |
| 325 | mov d0,%rax |
| 326 | shr $26,%rax |
| 327 | add %rax,d1 |
| 328 | # h0 = d0 & 0x3ffffff |
| 329 | mov d0,%rbx |
| 330 | and $0x3ffffff,%ebx |
| 331 | |
| 332 | # d2 += d1 >> 26 |
| 333 | mov d1,%rax |
| 334 | shr $26,%rax |
| 335 | add %rax,d2 |
| 336 | # h1 = d1 & 0x3ffffff |
| 337 | mov d1,%rax |
| 338 | and $0x3ffffff,%eax |
| 339 | mov %eax,h1 |
| 340 | |
| 341 | # d3 += d2 >> 26 |
| 342 | mov d2,%rax |
| 343 | shr $26,%rax |
| 344 | add %rax,d3 |
| 345 | # h2 = d2 & 0x3ffffff |
| 346 | mov d2,%rax |
| 347 | and $0x3ffffff,%eax |
| 348 | mov %eax,h2 |
| 349 | |
| 350 | # d4 += d3 >> 26 |
| 351 | mov d3,%rax |
| 352 | shr $26,%rax |
| 353 | add %rax,d4 |
| 354 | # h3 = d3 & 0x3ffffff |
| 355 | mov d3,%rax |
| 356 | and $0x3ffffff,%eax |
| 357 | mov %eax,h3 |
| 358 | |
| 359 | # h0 += (d4 >> 26) * 5 |
| 360 | mov d4,%rax |
| 361 | shr $26,%rax |
| 362 | lea (%eax,%eax,4),%eax |
| 363 | add %eax,%ebx |
| 364 | # h4 = d4 & 0x3ffffff |
| 365 | mov d4,%rax |
| 366 | and $0x3ffffff,%eax |
| 367 | mov %eax,h4 |
| 368 | |
| 369 | # h1 += h0 >> 26 |
| 370 | mov %ebx,%eax |
| 371 | shr $26,%eax |
| 372 | add %eax,h1 |
| 373 | # h0 = h0 & 0x3ffffff |
| 374 | andl $0x3ffffff,%ebx |
| 375 | mov %ebx,h0 |
| 376 | |
| 377 | add $0x40,m |
| 378 | dec %rcx |
| 379 | jnz .Ldoblock4 |
| 380 | |
| 381 | vzeroupper |
| 382 | pop %r13 |
| 383 | pop %r12 |
| 384 | pop %rbx |
| 385 | ret |
| 386 | ENDPROC(poly1305_4block_avx2) |