blob: 59e6081602a25e26dc8c7032a1dff2039d852004 [file] [log] [blame]
Antoine Ténart1b44c5a2017-05-24 16:10:34 +02001/*
2 * Copyright (C) 2017 Marvell
3 *
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/dmapool.h>
14
15#include <crypto/aes.h>
16#include <crypto/skcipher.h>
17
18#include "safexcel.h"
19
20enum safexcel_cipher_direction {
21 SAFEXCEL_ENCRYPT,
22 SAFEXCEL_DECRYPT,
23};
24
25struct safexcel_cipher_ctx {
26 struct safexcel_context base;
27 struct safexcel_crypto_priv *priv;
28
29 enum safexcel_cipher_direction direction;
30 u32 mode;
31
32 __le32 key[8];
33 unsigned int key_len;
34};
35
36static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
37 struct crypto_async_request *async,
38 struct safexcel_command_desc *cdesc,
39 u32 length)
40{
41 struct skcipher_request *req = skcipher_request_cast(async);
42 struct safexcel_token *token;
43 unsigned offset = 0;
44
45 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
46 offset = AES_BLOCK_SIZE / sizeof(u32);
47 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
48
49 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
50 }
51
52 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
53
54 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
55 token[0].packet_length = length;
56 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
57 token[0].instructions = EIP197_TOKEN_INS_LAST |
58 EIP197_TOKEN_INS_TYPE_CRYTO |
59 EIP197_TOKEN_INS_TYPE_OUTPUT;
60}
61
62static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
63 unsigned int len)
64{
65 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
66 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
67 struct crypto_aes_ctx aes;
68 int ret, i;
69
70 ret = crypto_aes_expand_key(&aes, key, len);
71 if (ret) {
72 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
73 return ret;
74 }
75
76 for (i = 0; i < len / sizeof(u32); i++) {
77 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
78 ctx->base.needs_inv = true;
79 break;
80 }
81 }
82
83 for (i = 0; i < len / sizeof(u32); i++)
84 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
85
86 ctx->key_len = len;
87
88 memzero_explicit(&aes, sizeof(aes));
89 return 0;
90}
91
92static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
93 struct safexcel_command_desc *cdesc)
94{
95 struct safexcel_crypto_priv *priv = ctx->priv;
96 int ctrl_size;
97
98 if (ctx->direction == SAFEXCEL_ENCRYPT)
99 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
100 else
101 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
102
103 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
104 cdesc->control_data.control1 |= ctx->mode;
105
106 switch (ctx->key_len) {
107 case AES_KEYSIZE_128:
108 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
109 ctrl_size = 4;
110 break;
111 case AES_KEYSIZE_192:
112 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
113 ctrl_size = 6;
114 break;
115 case AES_KEYSIZE_256:
116 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
117 ctrl_size = 8;
118 break;
119 default:
120 dev_err(priv->dev, "aes keysize not supported: %u\n",
121 ctx->key_len);
122 return -EINVAL;
123 }
124 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
125
126 return 0;
127}
128
129static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
130 struct crypto_async_request *async,
131 bool *should_complete, int *ret)
132{
133 struct skcipher_request *req = skcipher_request_cast(async);
134 struct safexcel_result_desc *rdesc;
135 int ndesc = 0;
136
137 *ret = 0;
138
139 spin_lock_bh(&priv->ring[ring].egress_lock);
140 do {
141 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
142 if (IS_ERR(rdesc)) {
143 dev_err(priv->dev,
144 "cipher: result: could not retrieve the result descriptor\n");
145 *ret = PTR_ERR(rdesc);
146 break;
147 }
148
149 if (rdesc->result_data.error_code) {
150 dev_err(priv->dev,
151 "cipher: result: result descriptor error (%d)\n",
152 rdesc->result_data.error_code);
153 *ret = -EIO;
154 }
155
156 ndesc++;
157 } while (!rdesc->last_seg);
158
159 safexcel_complete(priv, ring);
160 spin_unlock_bh(&priv->ring[ring].egress_lock);
161
162 if (req->src == req->dst) {
163 dma_unmap_sg(priv->dev, req->src,
164 sg_nents_for_len(req->src, req->cryptlen),
165 DMA_BIDIRECTIONAL);
166 } else {
167 dma_unmap_sg(priv->dev, req->src,
168 sg_nents_for_len(req->src, req->cryptlen),
169 DMA_TO_DEVICE);
170 dma_unmap_sg(priv->dev, req->dst,
171 sg_nents_for_len(req->dst, req->cryptlen),
172 DMA_FROM_DEVICE);
173 }
174
175 *should_complete = true;
176
177 return ndesc;
178}
179
180static int safexcel_aes_send(struct crypto_async_request *async,
181 int ring, struct safexcel_request *request,
182 int *commands, int *results)
183{
184 struct skcipher_request *req = skcipher_request_cast(async);
185 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
186 struct safexcel_crypto_priv *priv = ctx->priv;
187 struct safexcel_command_desc *cdesc;
188 struct safexcel_result_desc *rdesc;
189 struct scatterlist *sg;
190 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
191 int i, ret = 0;
192
193 request->req = &req->base;
194
195 if (req->src == req->dst) {
196 nr_src = dma_map_sg(priv->dev, req->src,
197 sg_nents_for_len(req->src, req->cryptlen),
198 DMA_BIDIRECTIONAL);
199 nr_dst = nr_src;
200 if (!nr_src)
201 return -EINVAL;
202 } else {
203 nr_src = dma_map_sg(priv->dev, req->src,
204 sg_nents_for_len(req->src, req->cryptlen),
205 DMA_TO_DEVICE);
206 if (!nr_src)
207 return -EINVAL;
208
209 nr_dst = dma_map_sg(priv->dev, req->dst,
210 sg_nents_for_len(req->dst, req->cryptlen),
211 DMA_FROM_DEVICE);
212 if (!nr_dst) {
213 dma_unmap_sg(priv->dev, req->src,
214 sg_nents_for_len(req->src, req->cryptlen),
215 DMA_TO_DEVICE);
216 return -EINVAL;
217 }
218 }
219
220 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
221
222 spin_lock_bh(&priv->ring[ring].egress_lock);
223
224 /* command descriptors */
225 for_each_sg(req->src, sg, nr_src, i) {
226 int len = sg_dma_len(sg);
227
228 /* Do not overflow the request */
229 if (queued - len < 0)
230 len = queued;
231
232 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
233 sg_dma_address(sg), len, req->cryptlen,
234 ctx->base.ctxr_dma);
235 if (IS_ERR(cdesc)) {
236 /* No space left in the command descriptor ring */
237 ret = PTR_ERR(cdesc);
238 goto cdesc_rollback;
239 }
240 n_cdesc++;
241
242 if (n_cdesc == 1) {
243 safexcel_context_control(ctx, cdesc);
244 safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
245 }
246
247 queued -= len;
248 if (!queued)
249 break;
250 }
251
252 /* result descriptors */
253 for_each_sg(req->dst, sg, nr_dst, i) {
254 bool first = !i, last = (i == nr_dst - 1);
255 u32 len = sg_dma_len(sg);
256
257 rdesc = safexcel_add_rdesc(priv, ring, first, last,
258 sg_dma_address(sg), len);
259 if (IS_ERR(rdesc)) {
260 /* No space left in the result descriptor ring */
261 ret = PTR_ERR(rdesc);
262 goto rdesc_rollback;
263 }
264 n_rdesc++;
265 }
266
267 ctx->base.handle_result = safexcel_handle_result;
268
269 spin_unlock_bh(&priv->ring[ring].egress_lock);
270
271 *commands = n_cdesc;
272 *results = nr_dst;
273 return 0;
274
275rdesc_rollback:
276 for (i = 0; i < n_rdesc; i++)
277 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
278cdesc_rollback:
279 for (i = 0; i < n_cdesc; i++)
280 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
281
282 spin_unlock_bh(&priv->ring[ring].egress_lock);
283
284 if (req->src == req->dst) {
285 dma_unmap_sg(priv->dev, req->src,
286 sg_nents_for_len(req->src, req->cryptlen),
287 DMA_BIDIRECTIONAL);
288 } else {
289 dma_unmap_sg(priv->dev, req->src,
290 sg_nents_for_len(req->src, req->cryptlen),
291 DMA_TO_DEVICE);
292 dma_unmap_sg(priv->dev, req->dst,
293 sg_nents_for_len(req->dst, req->cryptlen),
294 DMA_FROM_DEVICE);
295 }
296
297 return ret;
298}
299
300static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
301 int ring,
302 struct crypto_async_request *async,
303 bool *should_complete, int *ret)
304{
305 struct skcipher_request *req = skcipher_request_cast(async);
306 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
307 struct safexcel_result_desc *rdesc;
308 int ndesc = 0, enq_ret;
309
310 *ret = 0;
311
312 spin_lock_bh(&priv->ring[ring].egress_lock);
313 do {
314 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
315 if (IS_ERR(rdesc)) {
316 dev_err(priv->dev,
317 "cipher: invalidate: could not retrieve the result descriptor\n");
318 *ret = PTR_ERR(rdesc);
319 break;
320 }
321
322 if (rdesc->result_data.error_code) {
323 dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
324 rdesc->result_data.error_code);
325 *ret = -EIO;
326 }
327
328 ndesc++;
329 } while (!rdesc->last_seg);
330
331 safexcel_complete(priv, ring);
332 spin_unlock_bh(&priv->ring[ring].egress_lock);
333
334 if (ctx->base.exit_inv) {
335 dma_pool_free(priv->context_pool, ctx->base.ctxr,
336 ctx->base.ctxr_dma);
337
338 *should_complete = true;
339
340 return ndesc;
341 }
342
343 ctx->base.needs_inv = false;
344 ctx->base.ring = safexcel_select_ring(priv);
345 ctx->base.send = safexcel_aes_send;
346
347 spin_lock_bh(&priv->lock);
348 enq_ret = crypto_enqueue_request(&priv->queue, async);
349 spin_unlock_bh(&priv->lock);
350
351 if (enq_ret != -EINPROGRESS)
352 *ret = enq_ret;
353
354 priv->need_dequeue = true;
355 *should_complete = false;
356
357 return ndesc;
358}
359
360static int safexcel_cipher_send_inv(struct crypto_async_request *async,
361 int ring, struct safexcel_request *request,
362 int *commands, int *results)
363{
364 struct skcipher_request *req = skcipher_request_cast(async);
365 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
366 struct safexcel_crypto_priv *priv = ctx->priv;
367 int ret;
368
369 ctx->base.handle_result = safexcel_handle_inv_result;
370
371 ret = safexcel_invalidate_cache(async, &ctx->base, priv,
372 ctx->base.ctxr_dma, ring, request);
373 if (unlikely(ret))
374 return ret;
375
376 *commands = 1;
377 *results = 1;
378
379 return 0;
380}
381
382static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
383{
384 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
385 struct safexcel_crypto_priv *priv = ctx->priv;
386 struct skcipher_request req;
387 struct safexcel_inv_result result = { 0 };
388
389 memset(&req, 0, sizeof(struct skcipher_request));
390
391 /* create invalidation request */
392 init_completion(&result.completion);
393 skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
394 safexcel_inv_complete, &result);
395
396 skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
397 ctx = crypto_tfm_ctx(req.base.tfm);
398 ctx->base.exit_inv = true;
399 ctx->base.send = safexcel_cipher_send_inv;
400
401 spin_lock_bh(&priv->lock);
402 crypto_enqueue_request(&priv->queue, &req.base);
403 spin_unlock_bh(&priv->lock);
404
405 if (!priv->need_dequeue)
406 safexcel_dequeue(priv);
407
408 wait_for_completion_interruptible(&result.completion);
409
410 if (result.error) {
411 dev_warn(priv->dev,
412 "cipher: sync: invalidate: completion error %d\n",
413 result.error);
414 return result.error;
415 }
416
417 return 0;
418}
419
420static int safexcel_aes(struct skcipher_request *req,
421 enum safexcel_cipher_direction dir, u32 mode)
422{
423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
424 struct safexcel_crypto_priv *priv = ctx->priv;
425 int ret;
426
427 ctx->direction = dir;
428 ctx->mode = mode;
429
430 if (ctx->base.ctxr) {
431 if (ctx->base.needs_inv)
432 ctx->base.send = safexcel_cipher_send_inv;
433 } else {
434 ctx->base.ring = safexcel_select_ring(priv);
435 ctx->base.send = safexcel_aes_send;
436
437 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
438 EIP197_GFP_FLAGS(req->base),
439 &ctx->base.ctxr_dma);
440 if (!ctx->base.ctxr)
441 return -ENOMEM;
442 }
443
444 spin_lock_bh(&priv->lock);
445 ret = crypto_enqueue_request(&priv->queue, &req->base);
446 spin_unlock_bh(&priv->lock);
447
448 if (!priv->need_dequeue)
449 safexcel_dequeue(priv);
450
451 return ret;
452}
453
454static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
455{
456 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
457 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
458}
459
460static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
461{
462 return safexcel_aes(req, SAFEXCEL_DECRYPT,
463 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
464}
465
466static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
467{
468 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
469 struct safexcel_alg_template *tmpl =
470 container_of(tfm->__crt_alg, struct safexcel_alg_template,
471 alg.skcipher.base);
472
473 ctx->priv = tmpl->priv;
474
475 return 0;
476}
477
478static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
479{
480 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
481 struct safexcel_crypto_priv *priv = ctx->priv;
482 int ret;
483
484 memzero_explicit(ctx->key, 8 * sizeof(u32));
485
486 /* context not allocated, skip invalidation */
487 if (!ctx->base.ctxr)
488 return;
489
490 memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
491
492 ret = safexcel_cipher_exit_inv(tfm);
493 if (ret)
494 dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
495}
496
497struct safexcel_alg_template safexcel_alg_ecb_aes = {
498 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
499 .alg.skcipher = {
500 .setkey = safexcel_aes_setkey,
501 .encrypt = safexcel_ecb_aes_encrypt,
502 .decrypt = safexcel_ecb_aes_decrypt,
503 .min_keysize = AES_MIN_KEY_SIZE,
504 .max_keysize = AES_MAX_KEY_SIZE,
505 .base = {
506 .cra_name = "ecb(aes)",
507 .cra_driver_name = "safexcel-ecb-aes",
508 .cra_priority = 300,
509 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
510 CRYPTO_ALG_KERN_DRIVER_ONLY,
511 .cra_blocksize = AES_BLOCK_SIZE,
512 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
513 .cra_alignmask = 0,
514 .cra_init = safexcel_skcipher_cra_init,
515 .cra_exit = safexcel_skcipher_cra_exit,
516 .cra_module = THIS_MODULE,
517 },
518 },
519};
520
521static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
522{
523 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
524 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
525}
526
527static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
528{
529 return safexcel_aes(req, SAFEXCEL_DECRYPT,
530 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
531}
532
533struct safexcel_alg_template safexcel_alg_cbc_aes = {
534 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
535 .alg.skcipher = {
536 .setkey = safexcel_aes_setkey,
537 .encrypt = safexcel_cbc_aes_encrypt,
538 .decrypt = safexcel_cbc_aes_decrypt,
539 .min_keysize = AES_MIN_KEY_SIZE,
540 .max_keysize = AES_MAX_KEY_SIZE,
541 .ivsize = AES_BLOCK_SIZE,
542 .base = {
543 .cra_name = "cbc(aes)",
544 .cra_driver_name = "safexcel-cbc-aes",
545 .cra_priority = 300,
546 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
547 CRYPTO_ALG_KERN_DRIVER_ONLY,
548 .cra_blocksize = AES_BLOCK_SIZE,
549 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
550 .cra_alignmask = 0,
551 .cra_init = safexcel_skcipher_cra_init,
552 .cra_exit = safexcel_skcipher_cra_exit,
553 .cra_module = THIS_MODULE,
554 },
555 },
556};