blob: e420ea6c0ec4dcd94a71223926ee1818b7ddce17 [file] [log] [blame]
Boris BREZILLONf63601f2015-06-18 15:46:20 +02001/*
2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/aes.h>
Boris BREZILLON7b3aaaa2015-06-18 15:46:22 +020016#include <crypto/des.h>
Boris BREZILLONf63601f2015-06-18 15:46:20 +020017
18#include "cesa.h"
19
Boris BREZILLON7b3aaaa2015-06-18 15:46:22 +020020struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
22 u8 key[DES_KEY_SIZE];
23};
24
Boris BREZILLONf63601f2015-06-18 15:46:20 +020025struct mv_cesa_aes_ctx {
26 struct mv_cesa_ctx base;
27 struct crypto_aes_ctx aes;
28};
29
Boris BREZILLONdb509a42015-06-18 15:46:21 +020030struct mv_cesa_ablkcipher_dma_iter {
31 struct mv_cesa_dma_iter base;
32 struct mv_cesa_sg_dma_iter src;
33 struct mv_cesa_sg_dma_iter dst;
34};
35
36static inline void
37mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
38 struct ablkcipher_request *req)
39{
40 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
41 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
42 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
43}
44
45static inline bool
46mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
47{
48 iter->src.op_offset = 0;
49 iter->dst.op_offset = 0;
50
51 return mv_cesa_req_dma_iter_next_op(&iter->base);
52}
53
54static inline void
55mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
56{
57 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
58
59 if (req->dst != req->src) {
60 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
61 DMA_FROM_DEVICE);
62 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
63 DMA_TO_DEVICE);
64 } else {
65 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
66 DMA_BIDIRECTIONAL);
67 }
68 mv_cesa_dma_cleanup(&creq->req.dma);
69}
70
71static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
72{
73 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
74
75 if (creq->req.base.type == CESA_DMA_REQ)
76 mv_cesa_ablkcipher_dma_cleanup(req);
77}
78
Boris BREZILLONf63601f2015-06-18 15:46:20 +020079static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
80{
81 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
82 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
83 struct mv_cesa_engine *engine = sreq->base.engine;
84 size_t len = min_t(size_t, req->nbytes - sreq->offset,
85 CESA_SA_SRAM_PAYLOAD_SIZE);
86
87 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
88 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
89 len, sreq->offset);
90
91 sreq->size = len;
92 mv_cesa_set_crypt_op_len(&sreq->op, len);
93
94 /* FIXME: only update enc_len field */
95 if (!sreq->skip_ctx) {
96 memcpy(engine->sram, &sreq->op, sizeof(sreq->op));
97 sreq->skip_ctx = true;
98 } else {
99 memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc));
100 }
101
102 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
103 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
104 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
105}
106
107static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
108 u32 status)
109{
110 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
111 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
112 struct mv_cesa_engine *engine = sreq->base.engine;
113 size_t len;
114
115 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
116 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
117 sreq->size, sreq->offset);
118
119 sreq->offset += len;
120 if (sreq->offset < req->nbytes)
121 return -EINPROGRESS;
122
123 return 0;
124}
125
126static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
127 u32 status)
128{
129 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
130 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
131 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
132 struct mv_cesa_engine *engine = sreq->base.engine;
133 int ret;
134
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200135 if (creq->req.base.type == CESA_DMA_REQ)
136 ret = mv_cesa_dma_process(&creq->req.dma, status);
137 else
138 ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
139
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200140 if (ret)
141 return ret;
142
143 memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
144 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
145
146 return 0;
147}
148
149static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
150{
151 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200152 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200153
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200154 if (creq->req.base.type == CESA_DMA_REQ)
155 mv_cesa_dma_step(&creq->req.dma);
156 else
157 mv_cesa_ablkcipher_std_step(ablkreq);
158}
159
160static inline void
161mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
162{
163 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
164 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
165
166 mv_cesa_dma_prepare(dreq, dreq->base.engine);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200167}
168
169static inline void
170mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
171{
172 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
173 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
174 struct mv_cesa_engine *engine = sreq->base.engine;
175
176 sreq->size = 0;
177 sreq->offset = 0;
178 mv_cesa_adjust_op(engine, &sreq->op);
179 memcpy(engine->sram, &sreq->op, sizeof(sreq->op));
180}
181
182static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
183 struct mv_cesa_engine *engine)
184{
185 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
186 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
187
188 creq->req.base.engine = engine;
189
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200190 if (creq->req.base.type == CESA_DMA_REQ)
191 mv_cesa_ablkcipher_dma_prepare(ablkreq);
192 else
193 mv_cesa_ablkcipher_std_prepare(ablkreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200194}
195
196static inline void
197mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
198{
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200199 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
200
201 mv_cesa_ablkcipher_cleanup(ablkreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200202}
203
204static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
205 .step = mv_cesa_ablkcipher_step,
206 .process = mv_cesa_ablkcipher_process,
207 .prepare = mv_cesa_ablkcipher_prepare,
208 .cleanup = mv_cesa_ablkcipher_req_cleanup,
209};
210
211static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
212{
213 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
214
215 ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
216
217 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
218
219 return 0;
220}
221
222static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
223 unsigned int len)
224{
225 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
226 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
227 int remaining;
228 int offset;
229 int ret;
230 int i;
231
232 ret = crypto_aes_expand_key(&ctx->aes, key, len);
233 if (ret) {
234 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
235 return ret;
236 }
237
238 remaining = (ctx->aes.key_length - 16) / 4;
239 offset = ctx->aes.key_length + 24 - remaining;
240 for (i = 0; i < remaining; i++)
241 ctx->aes.key_dec[4 + i] =
242 cpu_to_le32(ctx->aes.key_enc[offset + i]);
243
244 return 0;
245}
246
Boris BREZILLON7b3aaaa2015-06-18 15:46:22 +0200247static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
248 unsigned int len)
249{
250 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
251 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
252 u32 tmp[DES_EXPKEY_WORDS];
253 int ret;
254
255 if (len != DES_KEY_SIZE) {
256 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
257 return -EINVAL;
258 }
259
260 ret = des_ekey(tmp, key);
261 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
262 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
263 return -EINVAL;
264 }
265
266 memcpy(ctx->key, key, DES_KEY_SIZE);
267
268 return 0;
269}
270
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200271static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
272 const struct mv_cesa_op_ctx *op_templ)
273{
274 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
275 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
276 GFP_KERNEL : GFP_ATOMIC;
277 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
278 struct mv_cesa_ablkcipher_dma_iter iter;
279 struct mv_cesa_tdma_chain chain;
280 bool skip_ctx = false;
281 int ret;
282
283 dreq->base.type = CESA_DMA_REQ;
284 dreq->chain.first = NULL;
285 dreq->chain.last = NULL;
286
287 if (req->src != req->dst) {
288 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
289 DMA_TO_DEVICE);
290 if (!ret)
291 return -ENOMEM;
292
293 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
294 DMA_FROM_DEVICE);
295 if (!ret) {
296 ret = -ENOMEM;
297 goto err_unmap_src;
298 }
299 } else {
300 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
301 DMA_BIDIRECTIONAL);
302 if (!ret)
303 return -ENOMEM;
304 }
305
306 mv_cesa_tdma_desc_iter_init(&chain);
307 mv_cesa_ablkcipher_req_iter_init(&iter, req);
308
309 do {
310 struct mv_cesa_op_ctx *op;
311
312 op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
313 if (IS_ERR(op)) {
314 ret = PTR_ERR(op);
315 goto err_free_tdma;
316 }
317 skip_ctx = true;
318
319 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
320
321 /* Add input transfers */
322 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
323 &iter.src, flags);
324 if (ret)
325 goto err_free_tdma;
326
327 /* Add dummy desc to launch the crypto operation */
328 ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
329 if (ret)
330 goto err_free_tdma;
331
332 /* Add output transfers */
333 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
334 &iter.dst, flags);
335 if (ret)
336 goto err_free_tdma;
337
338 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
339
340 dreq->chain = chain;
341
342 return 0;
343
344err_free_tdma:
345 mv_cesa_dma_cleanup(dreq);
346 if (req->dst != req->src)
347 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
348 DMA_FROM_DEVICE);
349
350err_unmap_src:
351 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
352 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
353
354 return ret;
355}
356
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200357static inline int
358mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
359 const struct mv_cesa_op_ctx *op_templ)
360{
361 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
362 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
363
364 sreq->base.type = CESA_STD_REQ;
365 sreq->op = *op_templ;
366 sreq->skip_ctx = false;
367
368 return 0;
369}
370
371static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
372 struct mv_cesa_op_ctx *tmpl)
373{
374 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
375 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
376 unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200377 int ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200378
379 if (!IS_ALIGNED(req->nbytes, blksize))
380 return -EINVAL;
381
382 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
383 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
384
385 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
386 CESA_SA_DESC_CFG_OP_MSK);
387
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200388 /* TODO: add a threshold for DMA usage */
389 if (cesa_dev->caps->has_tdma)
390 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
391 else
392 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
393
394 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200395}
396
Boris BREZILLON7b3aaaa2015-06-18 15:46:22 +0200397static int mv_cesa_des_op(struct ablkcipher_request *req,
398 struct mv_cesa_op_ctx *tmpl)
399{
400 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
401 int ret;
402
403 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
404 CESA_SA_DESC_CFG_CRYPTM_MSK);
405
406 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
407
408 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
409 if (ret)
410 return ret;
411
412 ret = mv_cesa_queue_req(&req->base);
413 if (ret && ret != -EINPROGRESS)
414 mv_cesa_ablkcipher_cleanup(req);
415
416 return ret;
417}
418
419static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
420{
421 struct mv_cesa_op_ctx tmpl;
422
423 mv_cesa_set_op_cfg(&tmpl,
424 CESA_SA_DESC_CFG_CRYPTCM_ECB |
425 CESA_SA_DESC_CFG_DIR_ENC);
426
427 return mv_cesa_des_op(req, &tmpl);
428}
429
430static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
431{
432 struct mv_cesa_op_ctx tmpl;
433
434 mv_cesa_set_op_cfg(&tmpl,
435 CESA_SA_DESC_CFG_CRYPTCM_ECB |
436 CESA_SA_DESC_CFG_DIR_DEC);
437
438 return mv_cesa_des_op(req, &tmpl);
439}
440
441struct crypto_alg mv_cesa_ecb_des_alg = {
442 .cra_name = "ecb(des)",
443 .cra_driver_name = "mv-ecb-des",
444 .cra_priority = 300,
445 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
446 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
447 .cra_blocksize = DES_BLOCK_SIZE,
448 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
449 .cra_alignmask = 0,
450 .cra_type = &crypto_ablkcipher_type,
451 .cra_module = THIS_MODULE,
452 .cra_init = mv_cesa_ablkcipher_cra_init,
453 .cra_u = {
454 .ablkcipher = {
455 .min_keysize = DES_KEY_SIZE,
456 .max_keysize = DES_KEY_SIZE,
457 .setkey = mv_cesa_des_setkey,
458 .encrypt = mv_cesa_ecb_des_encrypt,
459 .decrypt = mv_cesa_ecb_des_decrypt,
460 },
461 },
462};
463
464static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
465 struct mv_cesa_op_ctx *tmpl)
466{
467 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
468 CESA_SA_DESC_CFG_CRYPTCM_MSK);
469
470 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
471
472 return mv_cesa_des_op(req, tmpl);
473}
474
475static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
476{
477 struct mv_cesa_op_ctx tmpl;
478
479 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
480
481 return mv_cesa_cbc_des_op(req, &tmpl);
482}
483
484static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
485{
486 struct mv_cesa_op_ctx tmpl;
487
488 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
489
490 return mv_cesa_cbc_des_op(req, &tmpl);
491}
492
493struct crypto_alg mv_cesa_cbc_des_alg = {
494 .cra_name = "cbc(des)",
495 .cra_driver_name = "mv-cbc-des",
496 .cra_priority = 300,
497 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
498 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
499 .cra_blocksize = DES_BLOCK_SIZE,
500 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
501 .cra_alignmask = 0,
502 .cra_type = &crypto_ablkcipher_type,
503 .cra_module = THIS_MODULE,
504 .cra_init = mv_cesa_ablkcipher_cra_init,
505 .cra_u = {
506 .ablkcipher = {
507 .min_keysize = DES_KEY_SIZE,
508 .max_keysize = DES_KEY_SIZE,
509 .ivsize = DES_BLOCK_SIZE,
510 .setkey = mv_cesa_des_setkey,
511 .encrypt = mv_cesa_cbc_des_encrypt,
512 .decrypt = mv_cesa_cbc_des_decrypt,
513 },
514 },
515};
516
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200517static int mv_cesa_aes_op(struct ablkcipher_request *req,
518 struct mv_cesa_op_ctx *tmpl)
519{
520 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
521 int ret, i;
522 u32 *key;
523 u32 cfg;
524
525 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
526
527 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
528 key = ctx->aes.key_dec;
529 else
530 key = ctx->aes.key_enc;
531
532 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
533 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
534
535 if (ctx->aes.key_length == 24)
536 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
537 else if (ctx->aes.key_length == 32)
538 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
539
540 mv_cesa_update_op_cfg(tmpl, cfg,
541 CESA_SA_DESC_CFG_CRYPTM_MSK |
542 CESA_SA_DESC_CFG_AES_LEN_MSK);
543
544 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
545 if (ret)
546 return ret;
547
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200548 ret = mv_cesa_queue_req(&req->base);
549 if (ret && ret != -EINPROGRESS)
550 mv_cesa_ablkcipher_cleanup(req);
551
552 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200553}
554
555static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
556{
557 struct mv_cesa_op_ctx tmpl;
558
559 mv_cesa_set_op_cfg(&tmpl,
560 CESA_SA_DESC_CFG_CRYPTCM_ECB |
561 CESA_SA_DESC_CFG_DIR_ENC);
562
563 return mv_cesa_aes_op(req, &tmpl);
564}
565
566static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
567{
568 struct mv_cesa_op_ctx tmpl;
569
570 mv_cesa_set_op_cfg(&tmpl,
571 CESA_SA_DESC_CFG_CRYPTCM_ECB |
572 CESA_SA_DESC_CFG_DIR_DEC);
573
574 return mv_cesa_aes_op(req, &tmpl);
575}
576
577struct crypto_alg mv_cesa_ecb_aes_alg = {
578 .cra_name = "ecb(aes)",
579 .cra_driver_name = "mv-ecb-aes",
580 .cra_priority = 300,
581 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
582 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
583 .cra_blocksize = AES_BLOCK_SIZE,
584 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
585 .cra_alignmask = 0,
586 .cra_type = &crypto_ablkcipher_type,
587 .cra_module = THIS_MODULE,
588 .cra_init = mv_cesa_ablkcipher_cra_init,
589 .cra_u = {
590 .ablkcipher = {
591 .min_keysize = AES_MIN_KEY_SIZE,
592 .max_keysize = AES_MAX_KEY_SIZE,
593 .setkey = mv_cesa_aes_setkey,
594 .encrypt = mv_cesa_ecb_aes_encrypt,
595 .decrypt = mv_cesa_ecb_aes_decrypt,
596 },
597 },
598};
599
600static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
601 struct mv_cesa_op_ctx *tmpl)
602{
603 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
604 CESA_SA_DESC_CFG_CRYPTCM_MSK);
605 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
606
607 return mv_cesa_aes_op(req, tmpl);
608}
609
610static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
611{
612 struct mv_cesa_op_ctx tmpl;
613
614 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
615
616 return mv_cesa_cbc_aes_op(req, &tmpl);
617}
618
619static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
620{
621 struct mv_cesa_op_ctx tmpl;
622
623 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
624
625 return mv_cesa_cbc_aes_op(req, &tmpl);
626}
627
628struct crypto_alg mv_cesa_cbc_aes_alg = {
629 .cra_name = "cbc(aes)",
630 .cra_driver_name = "mv-cbc-aes",
631 .cra_priority = 300,
632 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
633 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
634 .cra_blocksize = AES_BLOCK_SIZE,
635 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
636 .cra_alignmask = 0,
637 .cra_type = &crypto_ablkcipher_type,
638 .cra_module = THIS_MODULE,
639 .cra_init = mv_cesa_ablkcipher_cra_init,
640 .cra_u = {
641 .ablkcipher = {
642 .min_keysize = AES_MIN_KEY_SIZE,
643 .max_keysize = AES_MAX_KEY_SIZE,
644 .ivsize = AES_BLOCK_SIZE,
645 .setkey = mv_cesa_aes_setkey,
646 .encrypt = mv_cesa_cbc_aes_encrypt,
647 .decrypt = mv_cesa_cbc_aes_decrypt,
648 },
649 },
650};