blob: 2d33f6816c3153b396adc45c97ec152beeb17dde [file] [log] [blame]
Boris BREZILLONf63601f2015-06-18 15:46:20 +02001/*
2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/sha.h>
16
17#include "cesa.h"
18
19static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
20 gfp_t flags)
21{
22 creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
23 if (!creq->cache)
24 return -ENOMEM;
25
26 return 0;
27}
28
29static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
30{
31 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
32 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
33 GFP_KERNEL : GFP_ATOMIC;
34
35 if (creq->cache)
36 return 0;
37
38 return mv_cesa_ahash_std_alloc_cache(creq, flags);
39}
40
41static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
42{
43 kfree(creq->cache);
44}
45
46static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
47{
48 if (!creq->cache)
49 return;
50
51 mv_cesa_ahash_std_free_cache(creq);
52
53 creq->cache = NULL;
54}
55
56static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
57{
58 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
59
60 mv_cesa_ahash_free_cache(creq);
61}
62
63static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
64{
65 unsigned int index, padlen;
66
67 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
68 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
69
70 return padlen;
71}
72
73static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
74{
75 __be64 bits = cpu_to_be64(creq->len << 3);
76 unsigned int index, padlen;
77
78 buf[0] = 0x80;
79 /* Pad out to 56 mod 64 */
80 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
81 padlen = mv_cesa_ahash_pad_len(creq);
82 memset(buf + 1, 0, padlen - 1);
83 memcpy(buf + padlen, &bits, sizeof(bits));
84
85 return padlen + 8;
86}
87
88static void mv_cesa_ahash_std_step(struct ahash_request *req)
89{
90 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
91 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
92 struct mv_cesa_engine *engine = sreq->base.engine;
93 struct mv_cesa_op_ctx *op;
94 unsigned int new_cache_ptr = 0;
95 u32 frag_mode;
96 size_t len;
97
98 if (creq->cache_ptr)
99 memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache,
100 creq->cache_ptr);
101
102 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
103 CESA_SA_SRAM_PAYLOAD_SIZE);
104
105 if (!creq->last_req) {
106 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
107 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
108 }
109
110 if (len - creq->cache_ptr)
111 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
112 engine->sram +
113 CESA_SA_DATA_SRAM_OFFSET +
114 creq->cache_ptr,
115 len - creq->cache_ptr,
116 sreq->offset);
117
118 op = &creq->op_tmpl;
119
120 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
121
122 if (creq->last_req && sreq->offset == req->nbytes &&
123 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
124 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
125 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
126 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
127 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
128 }
129
130 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
131 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
132 if (len &&
133 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
134 mv_cesa_set_mac_op_total_len(op, creq->len);
135 } else {
136 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
137
138 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
139 len &= CESA_HASH_BLOCK_SIZE_MSK;
140 new_cache_ptr = 64 - trailerlen;
141 memcpy(creq->cache,
142 engine->sram +
143 CESA_SA_DATA_SRAM_OFFSET + len,
144 new_cache_ptr);
145 } else {
146 len += mv_cesa_ahash_pad_req(creq,
147 engine->sram + len +
148 CESA_SA_DATA_SRAM_OFFSET);
149 }
150
151 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
152 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
153 else
154 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
155 }
156 }
157
158 mv_cesa_set_mac_op_frag_len(op, len);
159 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
160
161 /* FIXME: only update enc_len field */
162 memcpy(engine->sram, op, sizeof(*op));
163
164 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
165 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
166 CESA_SA_DESC_CFG_FRAG_MSK);
167
168 creq->cache_ptr = new_cache_ptr;
169
170 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
171 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
172 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
173}
174
175static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
176{
177 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
178 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
179
180 if (sreq->offset < (req->nbytes - creq->cache_ptr))
181 return -EINPROGRESS;
182
183 return 0;
184}
185
186static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
187{
188 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
189 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
190 struct mv_cesa_engine *engine = sreq->base.engine;
191
192 sreq->offset = 0;
193 mv_cesa_adjust_op(engine, &creq->op_tmpl);
194 memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
195}
196
197static void mv_cesa_ahash_step(struct crypto_async_request *req)
198{
199 struct ahash_request *ahashreq = ahash_request_cast(req);
200
201 mv_cesa_ahash_std_step(ahashreq);
202}
203
204static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
205{
206 struct ahash_request *ahashreq = ahash_request_cast(req);
207 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
208 struct mv_cesa_engine *engine = creq->req.base.engine;
209 unsigned int digsize;
210 int ret, i;
211
212 ret = mv_cesa_ahash_std_process(ahashreq, status);
213 if (ret == -EINPROGRESS)
214 return ret;
215
216 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
217 for (i = 0; i < digsize / 4; i++)
218 creq->state[i] = readl(engine->regs + CESA_IVDIG(i));
219
220 if (creq->cache_ptr)
221 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
222 creq->cache,
223 creq->cache_ptr,
224 ahashreq->nbytes - creq->cache_ptr);
225
226 if (creq->last_req) {
227 for (i = 0; i < digsize / 4; i++)
228 creq->state[i] = cpu_to_be32(creq->state[i]);
229
230 memcpy(ahashreq->result, creq->state, digsize);
231 }
232
233 return ret;
234}
235
236static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
237 struct mv_cesa_engine *engine)
238{
239 struct ahash_request *ahashreq = ahash_request_cast(req);
240 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
241 unsigned int digsize;
242 int i;
243
244 creq->req.base.engine = engine;
245
246 mv_cesa_ahash_std_prepare(ahashreq);
247
248 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
249 for (i = 0; i < digsize / 4; i++)
250 writel(creq->state[i],
251 engine->regs + CESA_IVDIG(i));
252}
253
254static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
255{
256 struct ahash_request *ahashreq = ahash_request_cast(req);
257 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
258
259 if (creq->last_req)
260 mv_cesa_ahash_last_cleanup(ahashreq);
261}
262
263static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
264 .step = mv_cesa_ahash_step,
265 .process = mv_cesa_ahash_process,
266 .prepare = mv_cesa_ahash_prepare,
267 .cleanup = mv_cesa_ahash_req_cleanup,
268};
269
270static int mv_cesa_ahash_init(struct ahash_request *req,
271 struct mv_cesa_op_ctx *tmpl)
272{
273 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
274
275 memset(creq, 0, sizeof(*creq));
276 mv_cesa_update_op_cfg(tmpl,
277 CESA_SA_DESC_CFG_OP_MAC_ONLY |
278 CESA_SA_DESC_CFG_FIRST_FRAG,
279 CESA_SA_DESC_CFG_OP_MSK |
280 CESA_SA_DESC_CFG_FRAG_MSK);
281 mv_cesa_set_mac_op_total_len(tmpl, 0);
282 mv_cesa_set_mac_op_frag_len(tmpl, 0);
283 creq->op_tmpl = *tmpl;
284 creq->len = 0;
285
286 return 0;
287}
288
289static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
290{
291 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
292
293 ctx->base.ops = &mv_cesa_ahash_req_ops;
294
295 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
296 sizeof(struct mv_cesa_ahash_req));
297 return 0;
298}
299
300static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
301{
302 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
303 int ret;
304
305 if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
306 !creq->last_req) {
307 ret = mv_cesa_ahash_alloc_cache(req);
308 if (ret)
309 return ret;
310 }
311
312 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
313 *cached = true;
314
315 if (!req->nbytes)
316 return 0;
317
318 sg_pcopy_to_buffer(req->src, creq->src_nents,
319 creq->cache + creq->cache_ptr,
320 req->nbytes, 0);
321
322 creq->cache_ptr += req->nbytes;
323 }
324
325 return 0;
326}
327
328static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
329{
330 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
331
332 creq->req.base.type = CESA_STD_REQ;
333 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
334
335 return mv_cesa_ahash_cache_req(req, cached);
336}
337
338static int mv_cesa_ahash_update(struct ahash_request *req)
339{
340 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
341 bool cached = false;
342 int ret;
343
344 creq->len += req->nbytes;
345 ret = mv_cesa_ahash_req_init(req, &cached);
346 if (ret)
347 return ret;
348
349 if (cached)
350 return 0;
351
352 return mv_cesa_queue_req(&req->base);
353}
354
355static int mv_cesa_ahash_final(struct ahash_request *req)
356{
357 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
358 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
359 bool cached = false;
360 int ret;
361
362 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
363 creq->last_req = true;
364 req->nbytes = 0;
365
366 ret = mv_cesa_ahash_req_init(req, &cached);
367 if (ret)
368 return ret;
369
370 if (cached)
371 return 0;
372
373 return mv_cesa_queue_req(&req->base);
374}
375
376static int mv_cesa_ahash_finup(struct ahash_request *req)
377{
378 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
379 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
380 bool cached = false;
381 int ret;
382
383 creq->len += req->nbytes;
384 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
385 creq->last_req = true;
386
387 ret = mv_cesa_ahash_req_init(req, &cached);
388 if (ret)
389 return ret;
390
391 if (cached)
392 return 0;
393
394 return mv_cesa_queue_req(&req->base);
395}
396
397static int mv_cesa_sha1_init(struct ahash_request *req)
398{
399 struct mv_cesa_op_ctx tmpl;
400
401 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
402
403 mv_cesa_ahash_init(req, &tmpl);
404
405 return 0;
406}
407
408static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
409{
410 struct sha1_state *out_state = out;
411 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
412 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
413 unsigned int digsize = crypto_ahash_digestsize(ahash);
414
415 out_state->count = creq->len;
416 memcpy(out_state->state, creq->state, digsize);
417 memset(out_state->buffer, 0, sizeof(out_state->buffer));
418 if (creq->cache)
419 memcpy(out_state->buffer, creq->cache, creq->cache_ptr);
420
421 return 0;
422}
423
424static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
425{
426 const struct sha1_state *in_state = in;
427 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
428 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
429 unsigned int digsize = crypto_ahash_digestsize(ahash);
430 unsigned int cache_ptr;
431 int ret;
432
433 creq->len = in_state->count;
434 memcpy(creq->state, in_state->state, digsize);
435 creq->cache_ptr = 0;
436
437 cache_ptr = creq->len % SHA1_BLOCK_SIZE;
438 if (!cache_ptr)
439 return 0;
440
441 ret = mv_cesa_ahash_alloc_cache(req);
442 if (ret)
443 return ret;
444
445 memcpy(creq->cache, in_state->buffer, cache_ptr);
446 creq->cache_ptr = cache_ptr;
447
448 return 0;
449}
450
451static int mv_cesa_sha1_digest(struct ahash_request *req)
452{
453 int ret;
454
455 ret = mv_cesa_sha1_init(req);
456 if (ret)
457 return ret;
458
459 return mv_cesa_ahash_finup(req);
460}
461
462struct ahash_alg mv_sha1_alg = {
463 .init = mv_cesa_sha1_init,
464 .update = mv_cesa_ahash_update,
465 .final = mv_cesa_ahash_final,
466 .finup = mv_cesa_ahash_finup,
467 .digest = mv_cesa_sha1_digest,
468 .export = mv_cesa_sha1_export,
469 .import = mv_cesa_sha1_import,
470 .halg = {
471 .digestsize = SHA1_DIGEST_SIZE,
472 .base = {
473 .cra_name = "sha1",
474 .cra_driver_name = "mv-sha1",
475 .cra_priority = 300,
476 .cra_flags = CRYPTO_ALG_ASYNC |
477 CRYPTO_ALG_KERN_DRIVER_ONLY,
478 .cra_blocksize = SHA1_BLOCK_SIZE,
479 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
480 .cra_init = mv_cesa_ahash_cra_init,
481 .cra_module = THIS_MODULE,
482 }
483 }
484};
485
486struct mv_cesa_ahash_result {
487 struct completion completion;
488 int error;
489};
490
491static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
492 int error)
493{
494 struct mv_cesa_ahash_result *result = req->data;
495
496 if (error == -EINPROGRESS)
497 return;
498
499 result->error = error;
500 complete(&result->completion);
501}
502
503static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
504 void *state, unsigned int blocksize)
505{
506 struct mv_cesa_ahash_result result;
507 struct scatterlist sg;
508 int ret;
509
510 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
511 mv_cesa_hmac_ahash_complete, &result);
512 sg_init_one(&sg, pad, blocksize);
513 ahash_request_set_crypt(req, &sg, pad, blocksize);
514 init_completion(&result.completion);
515
516 ret = crypto_ahash_init(req);
517 if (ret)
518 return ret;
519
520 ret = crypto_ahash_update(req);
521 if (ret && ret != -EINPROGRESS)
522 return ret;
523
524 wait_for_completion_interruptible(&result.completion);
525 if (result.error)
526 return result.error;
527
528 ret = crypto_ahash_export(req, state);
529 if (ret)
530 return ret;
531
532 return 0;
533}
534
535static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
536 const u8 *key, unsigned int keylen,
537 u8 *ipad, u8 *opad,
538 unsigned int blocksize)
539{
540 struct mv_cesa_ahash_result result;
541 struct scatterlist sg;
542 int ret;
543 int i;
544
545 if (keylen <= blocksize) {
546 memcpy(ipad, key, keylen);
547 } else {
548 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
549
550 if (!keydup)
551 return -ENOMEM;
552
553 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
554 mv_cesa_hmac_ahash_complete,
555 &result);
556 sg_init_one(&sg, keydup, keylen);
557 ahash_request_set_crypt(req, &sg, ipad, keylen);
558 init_completion(&result.completion);
559
560 ret = crypto_ahash_digest(req);
561 if (ret == -EINPROGRESS) {
562 wait_for_completion_interruptible(&result.completion);
563 ret = result.error;
564 }
565
566 /* Set the memory region to 0 to avoid any leak. */
567 memset(keydup, 0, keylen);
568 kfree(keydup);
569
570 if (ret)
571 return ret;
572
573 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
574 }
575
576 memset(ipad + keylen, 0, blocksize - keylen);
577 memcpy(opad, ipad, blocksize);
578
579 for (i = 0; i < blocksize; i++) {
580 ipad[i] ^= 0x36;
581 opad[i] ^= 0x5c;
582 }
583
584 return 0;
585}
586
587static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
588 const u8 *key, unsigned int keylen,
589 void *istate, void *ostate)
590{
591 struct ahash_request *req;
592 struct crypto_ahash *tfm;
593 unsigned int blocksize;
594 u8 *ipad = NULL;
595 u8 *opad;
596 int ret;
597
598 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
599 CRYPTO_ALG_TYPE_AHASH_MASK);
600 if (IS_ERR(tfm))
601 return PTR_ERR(tfm);
602
603 req = ahash_request_alloc(tfm, GFP_KERNEL);
604 if (!req) {
605 ret = -ENOMEM;
606 goto free_ahash;
607 }
608
609 crypto_ahash_clear_flags(tfm, ~0);
610
611 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
612
613 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
614 if (!ipad) {
615 ret = -ENOMEM;
616 goto free_req;
617 }
618
619 opad = ipad + blocksize;
620
621 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
622 if (ret)
623 goto free_ipad;
624
625 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
626 if (ret)
627 goto free_ipad;
628
629 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
630
631free_ipad:
632 kfree(ipad);
633free_req:
634 ahash_request_free(req);
635free_ahash:
636 crypto_free_ahash(tfm);
637
638 return ret;
639}
640
641static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
642{
643 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
644
645 ctx->base.ops = &mv_cesa_ahash_req_ops;
646
647 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
648 sizeof(struct mv_cesa_ahash_req));
649 return 0;
650}
651
652static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
653{
654 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
655 struct mv_cesa_op_ctx tmpl;
656
657 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
658 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
659
660 mv_cesa_ahash_init(req, &tmpl);
661
662 return 0;
663}
664
665static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
666 unsigned int keylen)
667{
668 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
669 struct sha1_state istate, ostate;
670 int ret, i;
671
672 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
673 if (ret)
674 return ret;
675
676 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
677 ctx->iv[i] = be32_to_cpu(istate.state[i]);
678
679 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
680 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
681
682 return 0;
683}
684
685static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
686{
687 int ret;
688
689 ret = mv_cesa_ahmac_sha1_init(req);
690 if (ret)
691 return ret;
692
693 return mv_cesa_ahash_finup(req);
694}
695
696struct ahash_alg mv_ahmac_sha1_alg = {
697 .init = mv_cesa_ahmac_sha1_init,
698 .update = mv_cesa_ahash_update,
699 .final = mv_cesa_ahash_final,
700 .finup = mv_cesa_ahash_finup,
701 .digest = mv_cesa_ahmac_sha1_digest,
702 .setkey = mv_cesa_ahmac_sha1_setkey,
703 .export = mv_cesa_sha1_export,
704 .import = mv_cesa_sha1_import,
705 .halg = {
706 .digestsize = SHA1_DIGEST_SIZE,
707 .statesize = sizeof(struct sha1_state),
708 .base = {
709 .cra_name = "hmac(sha1)",
710 .cra_driver_name = "mv-hmac-sha1",
711 .cra_priority = 300,
712 .cra_flags = CRYPTO_ALG_ASYNC |
713 CRYPTO_ALG_KERN_DRIVER_ONLY,
714 .cra_blocksize = SHA1_BLOCK_SIZE,
715 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
716 .cra_init = mv_cesa_ahmac_cra_init,
717 .cra_module = THIS_MODULE,
718 }
719 }
720};