blob: 528da260a7cca67f951113331c42033d1e837e10 [file] [log] [blame]
Boris BREZILLONf63601f2015-06-18 15:46:20 +02001/*
2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/sha.h>
16
17#include "cesa.h"
18
Boris BREZILLONdb509a42015-06-18 15:46:21 +020019struct mv_cesa_ahash_dma_iter {
20 struct mv_cesa_dma_iter base;
21 struct mv_cesa_sg_dma_iter src;
22};
23
24static inline void
25mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
26 struct ahash_request *req)
27{
28 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
29 unsigned int len = req->nbytes;
30
31 if (!creq->last_req)
32 len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK;
33
34 mv_cesa_req_dma_iter_init(&iter->base, len);
35 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
36 iter->src.op_offset = creq->cache_ptr;
37}
38
39static inline bool
40mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
41{
42 iter->src.op_offset = 0;
43
44 return mv_cesa_req_dma_iter_next_op(&iter->base);
45}
46
47static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq,
48 gfp_t flags)
49{
50 struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma;
51
52 creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
53 &dreq->cache_dma);
54 if (!creq->cache)
55 return -ENOMEM;
56
57 return 0;
58}
59
Boris BREZILLONf63601f2015-06-18 15:46:20 +020060static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
61 gfp_t flags)
62{
63 creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
64 if (!creq->cache)
65 return -ENOMEM;
66
67 return 0;
68}
69
70static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
71{
72 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
73 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
74 GFP_KERNEL : GFP_ATOMIC;
Boris BREZILLONdb509a42015-06-18 15:46:21 +020075 int ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +020076
77 if (creq->cache)
78 return 0;
79
Boris BREZILLONdb509a42015-06-18 15:46:21 +020080 if (creq->req.base.type == CESA_DMA_REQ)
81 ret = mv_cesa_ahash_dma_alloc_cache(creq, flags);
82 else
83 ret = mv_cesa_ahash_std_alloc_cache(creq, flags);
84
85 return ret;
86}
87
88static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq)
89{
90 dma_pool_free(cesa_dev->dma->cache_pool, creq->cache,
91 creq->req.dma.cache_dma);
Boris BREZILLONf63601f2015-06-18 15:46:20 +020092}
93
94static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
95{
96 kfree(creq->cache);
97}
98
99static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
100{
101 if (!creq->cache)
102 return;
103
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200104 if (creq->req.base.type == CESA_DMA_REQ)
105 mv_cesa_ahash_dma_free_cache(creq);
106 else
107 mv_cesa_ahash_std_free_cache(creq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200108
109 creq->cache = NULL;
110}
111
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200112static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
113 gfp_t flags)
114{
115 if (req->padding)
116 return 0;
117
118 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
119 &req->padding_dma);
120 if (!req->padding)
121 return -ENOMEM;
122
123 return 0;
124}
125
126static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
127{
128 if (!req->padding)
129 return;
130
131 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
132 req->padding_dma);
133 req->padding = NULL;
134}
135
136static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
137{
138 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
139
140 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
141}
142
143static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
144{
145 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
146
147 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
148 mv_cesa_dma_cleanup(&creq->req.dma.base);
149}
150
151static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
152{
153 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
154
155 if (creq->req.base.type == CESA_DMA_REQ)
156 mv_cesa_ahash_dma_cleanup(req);
157}
158
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200159static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
160{
161 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
162
163 mv_cesa_ahash_free_cache(creq);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200164
165 if (creq->req.base.type == CESA_DMA_REQ)
166 mv_cesa_ahash_dma_last_cleanup(req);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200167}
168
169static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
170{
171 unsigned int index, padlen;
172
173 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
174 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
175
176 return padlen;
177}
178
179static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
180{
181 __be64 bits = cpu_to_be64(creq->len << 3);
182 unsigned int index, padlen;
183
184 buf[0] = 0x80;
185 /* Pad out to 56 mod 64 */
186 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
187 padlen = mv_cesa_ahash_pad_len(creq);
188 memset(buf + 1, 0, padlen - 1);
189 memcpy(buf + padlen, &bits, sizeof(bits));
190
191 return padlen + 8;
192}
193
194static void mv_cesa_ahash_std_step(struct ahash_request *req)
195{
196 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
197 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
198 struct mv_cesa_engine *engine = sreq->base.engine;
199 struct mv_cesa_op_ctx *op;
200 unsigned int new_cache_ptr = 0;
201 u32 frag_mode;
202 size_t len;
203
204 if (creq->cache_ptr)
205 memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache,
206 creq->cache_ptr);
207
208 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
209 CESA_SA_SRAM_PAYLOAD_SIZE);
210
211 if (!creq->last_req) {
212 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
213 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
214 }
215
216 if (len - creq->cache_ptr)
217 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
218 engine->sram +
219 CESA_SA_DATA_SRAM_OFFSET +
220 creq->cache_ptr,
221 len - creq->cache_ptr,
222 sreq->offset);
223
224 op = &creq->op_tmpl;
225
226 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
227
228 if (creq->last_req && sreq->offset == req->nbytes &&
229 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
230 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
231 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
232 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
233 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
234 }
235
236 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
237 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
238 if (len &&
239 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
240 mv_cesa_set_mac_op_total_len(op, creq->len);
241 } else {
242 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
243
244 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
245 len &= CESA_HASH_BLOCK_SIZE_MSK;
246 new_cache_ptr = 64 - trailerlen;
247 memcpy(creq->cache,
248 engine->sram +
249 CESA_SA_DATA_SRAM_OFFSET + len,
250 new_cache_ptr);
251 } else {
252 len += mv_cesa_ahash_pad_req(creq,
253 engine->sram + len +
254 CESA_SA_DATA_SRAM_OFFSET);
255 }
256
257 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
258 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
259 else
260 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
261 }
262 }
263
264 mv_cesa_set_mac_op_frag_len(op, len);
265 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
266
267 /* FIXME: only update enc_len field */
268 memcpy(engine->sram, op, sizeof(*op));
269
270 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
271 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
272 CESA_SA_DESC_CFG_FRAG_MSK);
273
274 creq->cache_ptr = new_cache_ptr;
275
276 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
277 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
278 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
279}
280
281static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
282{
283 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
284 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
285
286 if (sreq->offset < (req->nbytes - creq->cache_ptr))
287 return -EINPROGRESS;
288
289 return 0;
290}
291
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200292static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
293{
294 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
295 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base;
296
297 mv_cesa_dma_prepare(dreq, dreq->base.engine);
298}
299
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200300static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
301{
302 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
303 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
304 struct mv_cesa_engine *engine = sreq->base.engine;
305
306 sreq->offset = 0;
307 mv_cesa_adjust_op(engine, &creq->op_tmpl);
308 memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
309}
310
311static void mv_cesa_ahash_step(struct crypto_async_request *req)
312{
313 struct ahash_request *ahashreq = ahash_request_cast(req);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200314 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200315
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200316 if (creq->req.base.type == CESA_DMA_REQ)
317 mv_cesa_dma_step(&creq->req.dma.base);
318 else
319 mv_cesa_ahash_std_step(ahashreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200320}
321
322static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
323{
324 struct ahash_request *ahashreq = ahash_request_cast(req);
325 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
326 struct mv_cesa_engine *engine = creq->req.base.engine;
327 unsigned int digsize;
328 int ret, i;
329
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200330 if (creq->req.base.type == CESA_DMA_REQ)
331 ret = mv_cesa_dma_process(&creq->req.dma.base, status);
332 else
333 ret = mv_cesa_ahash_std_process(ahashreq, status);
334
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200335 if (ret == -EINPROGRESS)
336 return ret;
337
338 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
339 for (i = 0; i < digsize / 4; i++)
340 creq->state[i] = readl(engine->regs + CESA_IVDIG(i));
341
342 if (creq->cache_ptr)
343 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
344 creq->cache,
345 creq->cache_ptr,
346 ahashreq->nbytes - creq->cache_ptr);
347
348 if (creq->last_req) {
349 for (i = 0; i < digsize / 4; i++)
350 creq->state[i] = cpu_to_be32(creq->state[i]);
351
352 memcpy(ahashreq->result, creq->state, digsize);
353 }
354
355 return ret;
356}
357
358static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
359 struct mv_cesa_engine *engine)
360{
361 struct ahash_request *ahashreq = ahash_request_cast(req);
362 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
363 unsigned int digsize;
364 int i;
365
366 creq->req.base.engine = engine;
367
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200368 if (creq->req.base.type == CESA_DMA_REQ)
369 mv_cesa_ahash_dma_prepare(ahashreq);
370 else
371 mv_cesa_ahash_std_prepare(ahashreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200372
373 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
374 for (i = 0; i < digsize / 4; i++)
375 writel(creq->state[i],
376 engine->regs + CESA_IVDIG(i));
377}
378
379static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
380{
381 struct ahash_request *ahashreq = ahash_request_cast(req);
382 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
383
384 if (creq->last_req)
385 mv_cesa_ahash_last_cleanup(ahashreq);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200386
387 mv_cesa_ahash_cleanup(ahashreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200388}
389
390static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
391 .step = mv_cesa_ahash_step,
392 .process = mv_cesa_ahash_process,
393 .prepare = mv_cesa_ahash_prepare,
394 .cleanup = mv_cesa_ahash_req_cleanup,
395};
396
397static int mv_cesa_ahash_init(struct ahash_request *req,
398 struct mv_cesa_op_ctx *tmpl)
399{
400 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
401
402 memset(creq, 0, sizeof(*creq));
403 mv_cesa_update_op_cfg(tmpl,
404 CESA_SA_DESC_CFG_OP_MAC_ONLY |
405 CESA_SA_DESC_CFG_FIRST_FRAG,
406 CESA_SA_DESC_CFG_OP_MSK |
407 CESA_SA_DESC_CFG_FRAG_MSK);
408 mv_cesa_set_mac_op_total_len(tmpl, 0);
409 mv_cesa_set_mac_op_frag_len(tmpl, 0);
410 creq->op_tmpl = *tmpl;
411 creq->len = 0;
412
413 return 0;
414}
415
416static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
417{
418 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
419
420 ctx->base.ops = &mv_cesa_ahash_req_ops;
421
422 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
423 sizeof(struct mv_cesa_ahash_req));
424 return 0;
425}
426
427static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
428{
429 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
430 int ret;
431
432 if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
433 !creq->last_req) {
434 ret = mv_cesa_ahash_alloc_cache(req);
435 if (ret)
436 return ret;
437 }
438
439 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
440 *cached = true;
441
442 if (!req->nbytes)
443 return 0;
444
445 sg_pcopy_to_buffer(req->src, creq->src_nents,
446 creq->cache + creq->cache_ptr,
447 req->nbytes, 0);
448
449 creq->cache_ptr += req->nbytes;
450 }
451
452 return 0;
453}
454
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200455static struct mv_cesa_op_ctx *
456mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
457 struct mv_cesa_ahash_dma_iter *dma_iter,
458 struct mv_cesa_ahash_req *creq,
459 gfp_t flags)
460{
461 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
462 struct mv_cesa_op_ctx *op = NULL;
463 int ret;
464
465 if (!creq->cache_ptr)
466 return NULL;
467
468 ret = mv_cesa_dma_add_data_transfer(chain,
469 CESA_SA_DATA_SRAM_OFFSET,
470 ahashdreq->cache_dma,
471 creq->cache_ptr,
472 CESA_TDMA_DST_IN_SRAM,
473 flags);
474 if (ret)
475 return ERR_PTR(ret);
476
477 if (!dma_iter->base.op_len) {
478 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags);
479 if (IS_ERR(op))
480 return op;
481
482 mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr);
483
484 /* Add dummy desc to launch crypto operation */
485 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
486 if (ret)
487 return ERR_PTR(ret);
488 }
489
490 return op;
491}
492
493static struct mv_cesa_op_ctx *
494mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain,
495 struct mv_cesa_ahash_dma_iter *dma_iter,
496 struct mv_cesa_ahash_req *creq,
497 gfp_t flags)
498{
499 struct mv_cesa_op_ctx *op;
500 int ret;
501
502 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags);
503 if (IS_ERR(op))
504 return op;
505
506 mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len);
507
508 if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) ==
509 CESA_SA_DESC_CFG_FIRST_FRAG)
510 mv_cesa_update_op_cfg(&creq->op_tmpl,
511 CESA_SA_DESC_CFG_MID_FRAG,
512 CESA_SA_DESC_CFG_FRAG_MSK);
513
514 /* Add input transfers */
515 ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base,
516 &dma_iter->src, flags);
517 if (ret)
518 return ERR_PTR(ret);
519
520 /* Add dummy desc to launch crypto operation */
521 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
522 if (ret)
523 return ERR_PTR(ret);
524
525 return op;
526}
527
528static struct mv_cesa_op_ctx *
529mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
530 struct mv_cesa_ahash_dma_iter *dma_iter,
531 struct mv_cesa_ahash_req *creq,
532 struct mv_cesa_op_ctx *op,
533 gfp_t flags)
534{
535 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
536 unsigned int len, trailerlen, padoff = 0;
537 int ret;
538
539 if (!creq->last_req)
540 return op;
541
542 if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
543 u32 frag = CESA_SA_DESC_CFG_NOT_FRAG;
544
545 if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) !=
546 CESA_SA_DESC_CFG_FIRST_FRAG)
547 frag = CESA_SA_DESC_CFG_LAST_FRAG;
548
549 mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK);
550
551 return op;
552 }
553
554 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
555 if (ret)
556 return ERR_PTR(ret);
557
558 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
559
560 if (op) {
561 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len,
562 trailerlen);
563 if (len) {
564 ret = mv_cesa_dma_add_data_transfer(chain,
565 CESA_SA_DATA_SRAM_OFFSET +
566 dma_iter->base.op_len,
567 ahashdreq->padding_dma,
568 len, CESA_TDMA_DST_IN_SRAM,
569 flags);
570 if (ret)
571 return ERR_PTR(ret);
572
573 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
574 CESA_SA_DESC_CFG_FRAG_MSK);
575 mv_cesa_set_mac_op_frag_len(op,
576 dma_iter->base.op_len + len);
577 padoff += len;
578 }
579 }
580
581 if (padoff >= trailerlen)
582 return op;
583
584 if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) !=
585 CESA_SA_DESC_CFG_FIRST_FRAG)
586 mv_cesa_update_op_cfg(&creq->op_tmpl,
587 CESA_SA_DESC_CFG_MID_FRAG,
588 CESA_SA_DESC_CFG_FRAG_MSK);
589
590 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags);
591 if (IS_ERR(op))
592 return op;
593
594 mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff);
595
596 ret = mv_cesa_dma_add_data_transfer(chain,
597 CESA_SA_DATA_SRAM_OFFSET,
598 ahashdreq->padding_dma +
599 padoff,
600 trailerlen - padoff,
601 CESA_TDMA_DST_IN_SRAM,
602 flags);
603 if (ret)
604 return ERR_PTR(ret);
605
606 /* Add dummy desc to launch crypto operation */
607 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
608 if (ret)
609 return ERR_PTR(ret);
610
611 return op;
612}
613
614static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
615{
616 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
617 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
618 GFP_KERNEL : GFP_ATOMIC;
619 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
620 struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
621 struct mv_cesa_tdma_chain chain;
622 struct mv_cesa_ahash_dma_iter iter;
623 struct mv_cesa_op_ctx *op = NULL;
624 int ret;
625
626 dreq->chain.first = NULL;
627 dreq->chain.last = NULL;
628
629 if (creq->src_nents) {
630 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
631 DMA_TO_DEVICE);
632 if (!ret) {
633 ret = -ENOMEM;
634 goto err;
635 }
636 }
637
638 mv_cesa_tdma_desc_iter_init(&chain);
639 mv_cesa_ahash_req_iter_init(&iter, req);
640
641 op = mv_cesa_ahash_dma_add_cache(&chain, &iter,
642 creq, flags);
643 if (IS_ERR(op)) {
644 ret = PTR_ERR(op);
645 goto err_free_tdma;
646 }
647
648 do {
649 if (!iter.base.op_len)
650 break;
651
652 op = mv_cesa_ahash_dma_add_data(&chain, &iter,
653 creq, flags);
654 if (IS_ERR(op)) {
655 ret = PTR_ERR(op);
656 goto err_free_tdma;
657 }
658 } while (mv_cesa_ahash_req_iter_next_op(&iter));
659
660 op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags);
661 if (IS_ERR(op)) {
662 ret = PTR_ERR(op);
663 goto err_free_tdma;
664 }
665
666 if (op) {
667 /* Add dummy desc to wait for crypto operation end */
668 ret = mv_cesa_dma_add_dummy_end(&chain, flags);
669 if (ret)
670 goto err_free_tdma;
671 }
672
673 if (!creq->last_req)
674 creq->cache_ptr = req->nbytes + creq->cache_ptr -
675 iter.base.len;
676 else
677 creq->cache_ptr = 0;
678
679 dreq->chain = chain;
680
681 return 0;
682
683err_free_tdma:
684 mv_cesa_dma_cleanup(dreq);
685 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
686
687err:
688 mv_cesa_ahash_last_cleanup(req);
689
690 return ret;
691}
692
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200693static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
694{
695 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200696 int ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200697
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200698 if (cesa_dev->caps->has_tdma)
699 creq->req.base.type = CESA_DMA_REQ;
700 else
701 creq->req.base.type = CESA_STD_REQ;
702
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200703 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
704
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200705 ret = mv_cesa_ahash_cache_req(req, cached);
706 if (ret)
707 return ret;
708
709 if (*cached)
710 return 0;
711
712 if (creq->req.base.type == CESA_DMA_REQ)
713 ret = mv_cesa_ahash_dma_req_init(req);
714
715 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200716}
717
718static int mv_cesa_ahash_update(struct ahash_request *req)
719{
720 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
721 bool cached = false;
722 int ret;
723
724 creq->len += req->nbytes;
725 ret = mv_cesa_ahash_req_init(req, &cached);
726 if (ret)
727 return ret;
728
729 if (cached)
730 return 0;
731
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200732 ret = mv_cesa_queue_req(&req->base);
733 if (ret && ret != -EINPROGRESS) {
734 mv_cesa_ahash_cleanup(req);
735 return ret;
736 }
737
738 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200739}
740
741static int mv_cesa_ahash_final(struct ahash_request *req)
742{
743 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
744 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
745 bool cached = false;
746 int ret;
747
748 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
749 creq->last_req = true;
750 req->nbytes = 0;
751
752 ret = mv_cesa_ahash_req_init(req, &cached);
753 if (ret)
754 return ret;
755
756 if (cached)
757 return 0;
758
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200759 ret = mv_cesa_queue_req(&req->base);
760 if (ret && ret != -EINPROGRESS)
761 mv_cesa_ahash_cleanup(req);
762
763 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200764}
765
766static int mv_cesa_ahash_finup(struct ahash_request *req)
767{
768 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
769 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
770 bool cached = false;
771 int ret;
772
773 creq->len += req->nbytes;
774 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
775 creq->last_req = true;
776
777 ret = mv_cesa_ahash_req_init(req, &cached);
778 if (ret)
779 return ret;
780
781 if (cached)
782 return 0;
783
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200784 ret = mv_cesa_queue_req(&req->base);
785 if (ret && ret != -EINPROGRESS)
786 mv_cesa_ahash_cleanup(req);
787
788 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200789}
790
791static int mv_cesa_sha1_init(struct ahash_request *req)
792{
793 struct mv_cesa_op_ctx tmpl;
794
795 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
796
797 mv_cesa_ahash_init(req, &tmpl);
798
799 return 0;
800}
801
802static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
803{
804 struct sha1_state *out_state = out;
805 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
806 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
807 unsigned int digsize = crypto_ahash_digestsize(ahash);
808
809 out_state->count = creq->len;
810 memcpy(out_state->state, creq->state, digsize);
811 memset(out_state->buffer, 0, sizeof(out_state->buffer));
812 if (creq->cache)
813 memcpy(out_state->buffer, creq->cache, creq->cache_ptr);
814
815 return 0;
816}
817
818static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
819{
820 const struct sha1_state *in_state = in;
821 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
822 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
823 unsigned int digsize = crypto_ahash_digestsize(ahash);
824 unsigned int cache_ptr;
825 int ret;
826
827 creq->len = in_state->count;
828 memcpy(creq->state, in_state->state, digsize);
829 creq->cache_ptr = 0;
830
831 cache_ptr = creq->len % SHA1_BLOCK_SIZE;
832 if (!cache_ptr)
833 return 0;
834
835 ret = mv_cesa_ahash_alloc_cache(req);
836 if (ret)
837 return ret;
838
839 memcpy(creq->cache, in_state->buffer, cache_ptr);
840 creq->cache_ptr = cache_ptr;
841
842 return 0;
843}
844
845static int mv_cesa_sha1_digest(struct ahash_request *req)
846{
847 int ret;
848
849 ret = mv_cesa_sha1_init(req);
850 if (ret)
851 return ret;
852
853 return mv_cesa_ahash_finup(req);
854}
855
856struct ahash_alg mv_sha1_alg = {
857 .init = mv_cesa_sha1_init,
858 .update = mv_cesa_ahash_update,
859 .final = mv_cesa_ahash_final,
860 .finup = mv_cesa_ahash_finup,
861 .digest = mv_cesa_sha1_digest,
862 .export = mv_cesa_sha1_export,
863 .import = mv_cesa_sha1_import,
864 .halg = {
865 .digestsize = SHA1_DIGEST_SIZE,
866 .base = {
867 .cra_name = "sha1",
868 .cra_driver_name = "mv-sha1",
869 .cra_priority = 300,
870 .cra_flags = CRYPTO_ALG_ASYNC |
871 CRYPTO_ALG_KERN_DRIVER_ONLY,
872 .cra_blocksize = SHA1_BLOCK_SIZE,
873 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
874 .cra_init = mv_cesa_ahash_cra_init,
875 .cra_module = THIS_MODULE,
876 }
877 }
878};
879
880struct mv_cesa_ahash_result {
881 struct completion completion;
882 int error;
883};
884
885static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
886 int error)
887{
888 struct mv_cesa_ahash_result *result = req->data;
889
890 if (error == -EINPROGRESS)
891 return;
892
893 result->error = error;
894 complete(&result->completion);
895}
896
897static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
898 void *state, unsigned int blocksize)
899{
900 struct mv_cesa_ahash_result result;
901 struct scatterlist sg;
902 int ret;
903
904 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
905 mv_cesa_hmac_ahash_complete, &result);
906 sg_init_one(&sg, pad, blocksize);
907 ahash_request_set_crypt(req, &sg, pad, blocksize);
908 init_completion(&result.completion);
909
910 ret = crypto_ahash_init(req);
911 if (ret)
912 return ret;
913
914 ret = crypto_ahash_update(req);
915 if (ret && ret != -EINPROGRESS)
916 return ret;
917
918 wait_for_completion_interruptible(&result.completion);
919 if (result.error)
920 return result.error;
921
922 ret = crypto_ahash_export(req, state);
923 if (ret)
924 return ret;
925
926 return 0;
927}
928
929static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
930 const u8 *key, unsigned int keylen,
931 u8 *ipad, u8 *opad,
932 unsigned int blocksize)
933{
934 struct mv_cesa_ahash_result result;
935 struct scatterlist sg;
936 int ret;
937 int i;
938
939 if (keylen <= blocksize) {
940 memcpy(ipad, key, keylen);
941 } else {
942 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
943
944 if (!keydup)
945 return -ENOMEM;
946
947 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
948 mv_cesa_hmac_ahash_complete,
949 &result);
950 sg_init_one(&sg, keydup, keylen);
951 ahash_request_set_crypt(req, &sg, ipad, keylen);
952 init_completion(&result.completion);
953
954 ret = crypto_ahash_digest(req);
955 if (ret == -EINPROGRESS) {
956 wait_for_completion_interruptible(&result.completion);
957 ret = result.error;
958 }
959
960 /* Set the memory region to 0 to avoid any leak. */
961 memset(keydup, 0, keylen);
962 kfree(keydup);
963
964 if (ret)
965 return ret;
966
967 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
968 }
969
970 memset(ipad + keylen, 0, blocksize - keylen);
971 memcpy(opad, ipad, blocksize);
972
973 for (i = 0; i < blocksize; i++) {
974 ipad[i] ^= 0x36;
975 opad[i] ^= 0x5c;
976 }
977
978 return 0;
979}
980
981static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
982 const u8 *key, unsigned int keylen,
983 void *istate, void *ostate)
984{
985 struct ahash_request *req;
986 struct crypto_ahash *tfm;
987 unsigned int blocksize;
988 u8 *ipad = NULL;
989 u8 *opad;
990 int ret;
991
992 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
993 CRYPTO_ALG_TYPE_AHASH_MASK);
994 if (IS_ERR(tfm))
995 return PTR_ERR(tfm);
996
997 req = ahash_request_alloc(tfm, GFP_KERNEL);
998 if (!req) {
999 ret = -ENOMEM;
1000 goto free_ahash;
1001 }
1002
1003 crypto_ahash_clear_flags(tfm, ~0);
1004
1005 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1006
1007 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1008 if (!ipad) {
1009 ret = -ENOMEM;
1010 goto free_req;
1011 }
1012
1013 opad = ipad + blocksize;
1014
1015 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1016 if (ret)
1017 goto free_ipad;
1018
1019 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1020 if (ret)
1021 goto free_ipad;
1022
1023 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1024
1025free_ipad:
1026 kfree(ipad);
1027free_req:
1028 ahash_request_free(req);
1029free_ahash:
1030 crypto_free_ahash(tfm);
1031
1032 return ret;
1033}
1034
1035static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1036{
1037 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1038
1039 ctx->base.ops = &mv_cesa_ahash_req_ops;
1040
1041 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1042 sizeof(struct mv_cesa_ahash_req));
1043 return 0;
1044}
1045
1046static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1047{
1048 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1049 struct mv_cesa_op_ctx tmpl;
1050
1051 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1052 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1053
1054 mv_cesa_ahash_init(req, &tmpl);
1055
1056 return 0;
1057}
1058
1059static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1060 unsigned int keylen)
1061{
1062 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1063 struct sha1_state istate, ostate;
1064 int ret, i;
1065
1066 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1067 if (ret)
1068 return ret;
1069
1070 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1071 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1072
1073 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1074 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1075
1076 return 0;
1077}
1078
1079static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1080{
1081 int ret;
1082
1083 ret = mv_cesa_ahmac_sha1_init(req);
1084 if (ret)
1085 return ret;
1086
1087 return mv_cesa_ahash_finup(req);
1088}
1089
1090struct ahash_alg mv_ahmac_sha1_alg = {
1091 .init = mv_cesa_ahmac_sha1_init,
1092 .update = mv_cesa_ahash_update,
1093 .final = mv_cesa_ahash_final,
1094 .finup = mv_cesa_ahash_finup,
1095 .digest = mv_cesa_ahmac_sha1_digest,
1096 .setkey = mv_cesa_ahmac_sha1_setkey,
1097 .export = mv_cesa_sha1_export,
1098 .import = mv_cesa_sha1_import,
1099 .halg = {
1100 .digestsize = SHA1_DIGEST_SIZE,
1101 .statesize = sizeof(struct sha1_state),
1102 .base = {
1103 .cra_name = "hmac(sha1)",
1104 .cra_driver_name = "mv-hmac-sha1",
1105 .cra_priority = 300,
1106 .cra_flags = CRYPTO_ALG_ASYNC |
1107 CRYPTO_ALG_KERN_DRIVER_ONLY,
1108 .cra_blocksize = SHA1_BLOCK_SIZE,
1109 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1110 .cra_init = mv_cesa_ahmac_cra_init,
1111 .cra_module = THIS_MODULE,
1112 }
1113 }
1114};