blob: f2f0ec49fb85063726b881128446a45caffad7ca [file] [log] [blame]
David Zeuthen21e95262016-07-27 17:58:40 -04001/* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3 */
4
5/*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38#include "avb_sha.h"
39
40#define SHFR(x, n) (x >> n)
41#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
42#define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
43#define CH(x, y, z) ((x & y) ^ (~x & z))
44#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
45
46#define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
47#define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
48#define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7))
49#define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6))
50
51#define UNPACK32(x, str) \
52 { \
53 *((str) + 3) = (uint8_t)((x)); \
54 *((str) + 2) = (uint8_t)((x) >> 8); \
55 *((str) + 1) = (uint8_t)((x) >> 16); \
56 *((str) + 0) = (uint8_t)((x) >> 24); \
57 }
58
59#define UNPACK64(x, str) \
60 { \
61 *((str) + 7) = (uint8_t)x; \
62 *((str) + 6) = (uint8_t)((uint64_t)x >> 8); \
63 *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
64 *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
65 *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
66 *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
67 *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
68 *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
69 }
70
71#define PACK64(str, x) \
72 { \
73 *(x) = \
74 ((uint64_t) * ((str) + 7)) | ((uint64_t) * ((str) + 6) << 8) | \
75 ((uint64_t) * ((str) + 5) << 16) | ((uint64_t) * ((str) + 4) << 24) | \
76 ((uint64_t) * ((str) + 3) << 32) | ((uint64_t) * ((str) + 2) << 40) | \
77 ((uint64_t) * ((str) + 1) << 48) | ((uint64_t) * ((str) + 0) << 56); \
78 }
79
80/* Macros used for loops unrolling */
81
82#define SHA512_SCR(i) \
83 { w[i] = SHA512_F4(w[i - 2]) + w[i - 7] + SHA512_F3(w[i - 15]) + w[i - 16]; }
84
85#define SHA512_EXP(a, b, c, d, e, f, g, h, j) \
86 { \
87 t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha512_k[j] + \
88 w[j]; \
89 t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
90 wv[d] += t1; \
91 wv[h] = t1 + t2; \
92 }
93
94static const uint64_t sha512_h0[8] = {
95 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL,
96 0xa54ff53a5f1d36f1ULL, 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL,
97 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL};
98
99static const uint64_t sha512_k[80] = {
100 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
101 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
102 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
103 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
104 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
105 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
106 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
107 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
108 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
109 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
110 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
111 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
112 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
113 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
114 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
115 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
116 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
117 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
118 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
119 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
120 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
121 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
122 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
123 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
124 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
125 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
126 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL};
127
128/* SHA-512 implementation */
129
130void avb_sha512_init(AvbSHA512Ctx* ctx) {
131#ifdef UNROLL_LOOPS_SHA512
132 ctx->h[0] = sha512_h0[0];
133 ctx->h[1] = sha512_h0[1];
134 ctx->h[2] = sha512_h0[2];
135 ctx->h[3] = sha512_h0[3];
136 ctx->h[4] = sha512_h0[4];
137 ctx->h[5] = sha512_h0[5];
138 ctx->h[6] = sha512_h0[6];
139 ctx->h[7] = sha512_h0[7];
140#else
141 int i;
142
143 for (i = 0; i < 8; i++) ctx->h[i] = sha512_h0[i];
144#endif /* UNROLL_LOOPS_SHA512 */
145
146 ctx->len = 0;
147 ctx->tot_len = 0;
148}
149
150static void SHA512_transform(AvbSHA512Ctx* ctx, const uint8_t* message,
151 unsigned int block_nb) {
152 uint64_t w[80];
153 uint64_t wv[8];
154 uint64_t t1, t2;
155 const uint8_t* sub_block;
156 int i, j;
157
158 for (i = 0; i < (int)block_nb; i++) {
159 sub_block = message + (i << 7);
160
161#ifdef UNROLL_LOOPS_SHA512
162 PACK64(&sub_block[0], &w[0]);
163 PACK64(&sub_block[8], &w[1]);
164 PACK64(&sub_block[16], &w[2]);
165 PACK64(&sub_block[24], &w[3]);
166 PACK64(&sub_block[32], &w[4]);
167 PACK64(&sub_block[40], &w[5]);
168 PACK64(&sub_block[48], &w[6]);
169 PACK64(&sub_block[56], &w[7]);
170 PACK64(&sub_block[64], &w[8]);
171 PACK64(&sub_block[72], &w[9]);
172 PACK64(&sub_block[80], &w[10]);
173 PACK64(&sub_block[88], &w[11]);
174 PACK64(&sub_block[96], &w[12]);
175 PACK64(&sub_block[104], &w[13]);
176 PACK64(&sub_block[112], &w[14]);
177 PACK64(&sub_block[120], &w[15]);
178
179 SHA512_SCR(16);
180 SHA512_SCR(17);
181 SHA512_SCR(18);
182 SHA512_SCR(19);
183 SHA512_SCR(20);
184 SHA512_SCR(21);
185 SHA512_SCR(22);
186 SHA512_SCR(23);
187 SHA512_SCR(24);
188 SHA512_SCR(25);
189 SHA512_SCR(26);
190 SHA512_SCR(27);
191 SHA512_SCR(28);
192 SHA512_SCR(29);
193 SHA512_SCR(30);
194 SHA512_SCR(31);
195 SHA512_SCR(32);
196 SHA512_SCR(33);
197 SHA512_SCR(34);
198 SHA512_SCR(35);
199 SHA512_SCR(36);
200 SHA512_SCR(37);
201 SHA512_SCR(38);
202 SHA512_SCR(39);
203 SHA512_SCR(40);
204 SHA512_SCR(41);
205 SHA512_SCR(42);
206 SHA512_SCR(43);
207 SHA512_SCR(44);
208 SHA512_SCR(45);
209 SHA512_SCR(46);
210 SHA512_SCR(47);
211 SHA512_SCR(48);
212 SHA512_SCR(49);
213 SHA512_SCR(50);
214 SHA512_SCR(51);
215 SHA512_SCR(52);
216 SHA512_SCR(53);
217 SHA512_SCR(54);
218 SHA512_SCR(55);
219 SHA512_SCR(56);
220 SHA512_SCR(57);
221 SHA512_SCR(58);
222 SHA512_SCR(59);
223 SHA512_SCR(60);
224 SHA512_SCR(61);
225 SHA512_SCR(62);
226 SHA512_SCR(63);
227 SHA512_SCR(64);
228 SHA512_SCR(65);
229 SHA512_SCR(66);
230 SHA512_SCR(67);
231 SHA512_SCR(68);
232 SHA512_SCR(69);
233 SHA512_SCR(70);
234 SHA512_SCR(71);
235 SHA512_SCR(72);
236 SHA512_SCR(73);
237 SHA512_SCR(74);
238 SHA512_SCR(75);
239 SHA512_SCR(76);
240 SHA512_SCR(77);
241 SHA512_SCR(78);
242 SHA512_SCR(79);
243
244 wv[0] = ctx->h[0];
245 wv[1] = ctx->h[1];
246 wv[2] = ctx->h[2];
247 wv[3] = ctx->h[3];
248 wv[4] = ctx->h[4];
249 wv[5] = ctx->h[5];
250 wv[6] = ctx->h[6];
251 wv[7] = ctx->h[7];
252
253 j = 0;
254
255 do {
256 SHA512_EXP(0, 1, 2, 3, 4, 5, 6, 7, j);
257 j++;
258 SHA512_EXP(7, 0, 1, 2, 3, 4, 5, 6, j);
259 j++;
260 SHA512_EXP(6, 7, 0, 1, 2, 3, 4, 5, j);
261 j++;
262 SHA512_EXP(5, 6, 7, 0, 1, 2, 3, 4, j);
263 j++;
264 SHA512_EXP(4, 5, 6, 7, 0, 1, 2, 3, j);
265 j++;
266 SHA512_EXP(3, 4, 5, 6, 7, 0, 1, 2, j);
267 j++;
268 SHA512_EXP(2, 3, 4, 5, 6, 7, 0, 1, j);
269 j++;
270 SHA512_EXP(1, 2, 3, 4, 5, 6, 7, 0, j);
271 j++;
272 } while (j < 80);
273
274 ctx->h[0] += wv[0];
275 ctx->h[1] += wv[1];
276 ctx->h[2] += wv[2];
277 ctx->h[3] += wv[3];
278 ctx->h[4] += wv[4];
279 ctx->h[5] += wv[5];
280 ctx->h[6] += wv[6];
281 ctx->h[7] += wv[7];
282#else
283 for (j = 0; j < 16; j++) {
284 PACK64(&sub_block[j << 3], &w[j]);
285 }
286
287 for (j = 16; j < 80; j++) {
288 SHA512_SCR(j);
289 }
290
291 for (j = 0; j < 8; j++) {
292 wv[j] = ctx->h[j];
293 }
294
295 for (j = 0; j < 80; j++) {
296 t1 = wv[7] + SHA512_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha512_k[j] +
297 w[j];
298 t2 = SHA512_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
299 wv[7] = wv[6];
300 wv[6] = wv[5];
301 wv[5] = wv[4];
302 wv[4] = wv[3] + t1;
303 wv[3] = wv[2];
304 wv[2] = wv[1];
305 wv[1] = wv[0];
306 wv[0] = t1 + t2;
307 }
308
309 for (j = 0; j < 8; j++) ctx->h[j] += wv[j];
310#endif /* UNROLL_LOOPS_SHA512 */
311 }
312}
313
314void avb_sha512_update(AvbSHA512Ctx* ctx, const uint8_t* data, uint32_t len) {
315 unsigned int block_nb;
316 unsigned int new_len, rem_len, tmp_len;
317 const uint8_t* shifted_data;
318
319 tmp_len = AVB_SHA512_BLOCK_SIZE - ctx->len;
320 rem_len = len < tmp_len ? len : tmp_len;
321
322 avb_memcpy(&ctx->block[ctx->len], data, rem_len);
323
324 if (ctx->len + len < AVB_SHA512_BLOCK_SIZE) {
325 ctx->len += len;
326 return;
327 }
328
329 new_len = len - rem_len;
330 block_nb = new_len / AVB_SHA512_BLOCK_SIZE;
331
332 shifted_data = data + rem_len;
333
334 SHA512_transform(ctx, ctx->block, 1);
335 SHA512_transform(ctx, shifted_data, block_nb);
336
337 rem_len = new_len % AVB_SHA512_BLOCK_SIZE;
338
339 avb_memcpy(ctx->block, &shifted_data[block_nb << 7], rem_len);
340
341 ctx->len = rem_len;
342 ctx->tot_len += (block_nb + 1) << 7;
343}
344
345uint8_t* avb_sha512_final(AvbSHA512Ctx* ctx) {
346 unsigned int block_nb;
347 unsigned int pm_len;
348 unsigned int len_b;
349
350#ifndef UNROLL_LOOPS_SHA512
351 int i;
352#endif
353
354 block_nb =
355 1 + ((AVB_SHA512_BLOCK_SIZE - 17) < (ctx->len % AVB_SHA512_BLOCK_SIZE));
356
357 len_b = (ctx->tot_len + ctx->len) << 3;
358 pm_len = block_nb << 7;
359
360 avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
361 ctx->block[ctx->len] = 0x80;
362 UNPACK32(len_b, ctx->block + pm_len - 4);
363
364 SHA512_transform(ctx, ctx->block, block_nb);
365
366#ifdef UNROLL_LOOPS_SHA512
367 UNPACK64(ctx->h[0], &ctx->buf[0]);
368 UNPACK64(ctx->h[1], &ctx->buf[8]);
369 UNPACK64(ctx->h[2], &ctx->buf[16]);
370 UNPACK64(ctx->h[3], &ctx->buf[24]);
371 UNPACK64(ctx->h[4], &ctx->buf[32]);
372 UNPACK64(ctx->h[5], &ctx->buf[40]);
373 UNPACK64(ctx->h[6], &ctx->buf[48]);
374 UNPACK64(ctx->h[7], &ctx->buf[56]);
375#else
376 for (i = 0; i < 8; i++) UNPACK64(ctx->h[i], &ctx->buf[i << 3]);
377#endif /* UNROLL_LOOPS_SHA512 */
378
379 return ctx->buf;
380}