blob: 6e61cc93c2b0da3be9d2f68bf8e063deaf122054 [file] [log] [blame]
Horia Geantăb1898172017-03-17 12:06:02 +02001/*
2 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c
4 *
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 */
8
9#include "compat.h"
Horia Geantăd3b5a872017-10-24 09:27:31 +030010#include "ctrl.h"
Horia Geantăb1898172017-03-17 12:06:02 +020011#include "regs.h"
12#include "intern.h"
13#include "desc_constr.h"
14#include "error.h"
Horia Geantăb1898172017-03-17 12:06:02 +020015#include "sg_sw_qm.h"
16#include "key_gen.h"
17#include "qi.h"
18#include "jr.h"
19#include "caamalg_desc.h"
20
21/*
22 * crypto alg
23 */
24#define CAAM_CRA_PRIORITY 2000
25/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
28
29#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
30 CAAM_MAX_KEY_SIZE)
31#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
32
33struct caam_alg_entry {
34 int class1_alg_type;
35 int class2_alg_type;
36 bool rfc3686;
37 bool geniv;
38};
39
40struct caam_aead_alg {
41 struct aead_alg aead;
42 struct caam_alg_entry caam;
43 bool registered;
44};
45
46/*
47 * per-session context
48 */
49struct caam_ctx {
50 struct device *jrdev;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE];
55 dma_addr_t key_dma;
Horia Geantă7e0880b2017-12-19 12:16:07 +020056 enum dma_data_direction dir;
Horia Geantăb1898172017-03-17 12:06:02 +020057 struct alginfo adata;
58 struct alginfo cdata;
59 unsigned int authsize;
60 struct device *qidev;
61 spinlock_t lock; /* Protects multiple init of driver context */
62 struct caam_drv_ctx *drv_ctx[NUM_OP];
63};
64
65static int aead_set_sh_desc(struct crypto_aead *aead)
66{
67 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 typeof(*alg), aead);
69 struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 unsigned int ivsize = crypto_aead_ivsize(aead);
71 u32 ctx1_iv_off = 0;
72 u32 *nonce = NULL;
73 unsigned int data_len[2];
74 u32 inl_mask;
75 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 OP_ALG_AAI_CTR_MOD128);
77 const bool is_rfc3686 = alg->caam.rfc3686;
Horia Geantă7e0880b2017-12-19 12:16:07 +020078 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
Horia Geantăb1898172017-03-17 12:06:02 +020079
80 if (!ctx->cdata.keylen || !ctx->authsize)
81 return 0;
82
83 /*
84 * AES-CTR needs to load IV in CONTEXT1 reg
85 * at an offset of 128bits (16bytes)
86 * CONTEXT1[255:128] = IV
87 */
88 if (ctr_mode)
89 ctx1_iv_off = 16;
90
91 /*
92 * RFC3686 specific:
93 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
94 */
95 if (is_rfc3686) {
96 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
97 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
98 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
99 }
100
101 data_len[0] = ctx->adata.keylen_pad;
102 data_len[1] = ctx->cdata.keylen;
103
104 if (alg->caam.geniv)
105 goto skip_enc;
106
107 /* aead_encrypt shared descriptor */
108 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
109 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
110 DESC_JOB_IO_LEN, data_len, &inl_mask,
111 ARRAY_SIZE(data_len)) < 0)
112 return -EINVAL;
113
114 if (inl_mask & 1)
115 ctx->adata.key_virt = ctx->key;
116 else
117 ctx->adata.key_dma = ctx->key_dma;
118
119 if (inl_mask & 2)
120 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
121 else
122 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
123
124 ctx->adata.key_inline = !!(inl_mask & 1);
125 ctx->cdata.key_inline = !!(inl_mask & 2);
126
127 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
128 ivsize, ctx->authsize, is_rfc3686, nonce,
Horia Geantă7e0880b2017-12-19 12:16:07 +0200129 ctx1_iv_off, true, ctrlpriv->era);
Horia Geantăb1898172017-03-17 12:06:02 +0200130
131skip_enc:
132 /* aead_decrypt shared descriptor */
133 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
134 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
135 DESC_JOB_IO_LEN, data_len, &inl_mask,
136 ARRAY_SIZE(data_len)) < 0)
137 return -EINVAL;
138
139 if (inl_mask & 1)
140 ctx->adata.key_virt = ctx->key;
141 else
142 ctx->adata.key_dma = ctx->key_dma;
143
144 if (inl_mask & 2)
145 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
146 else
147 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
148
149 ctx->adata.key_inline = !!(inl_mask & 1);
150 ctx->cdata.key_inline = !!(inl_mask & 2);
151
152 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
153 ivsize, ctx->authsize, alg->caam.geniv,
Horia Geantă7e0880b2017-12-19 12:16:07 +0200154 is_rfc3686, nonce, ctx1_iv_off, true,
155 ctrlpriv->era);
Horia Geantăb1898172017-03-17 12:06:02 +0200156
157 if (!alg->caam.geniv)
158 goto skip_givenc;
159
160 /* aead_givencrypt shared descriptor */
161 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
162 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
163 DESC_JOB_IO_LEN, data_len, &inl_mask,
164 ARRAY_SIZE(data_len)) < 0)
165 return -EINVAL;
166
167 if (inl_mask & 1)
168 ctx->adata.key_virt = ctx->key;
169 else
170 ctx->adata.key_dma = ctx->key_dma;
171
172 if (inl_mask & 2)
173 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
174 else
175 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
176
177 ctx->adata.key_inline = !!(inl_mask & 1);
178 ctx->cdata.key_inline = !!(inl_mask & 2);
179
180 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
181 ivsize, ctx->authsize, is_rfc3686, nonce,
Horia Geantă7e0880b2017-12-19 12:16:07 +0200182 ctx1_iv_off, true, ctrlpriv->era);
Horia Geantăb1898172017-03-17 12:06:02 +0200183
184skip_givenc:
185 return 0;
186}
187
188static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
189{
190 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
191
192 ctx->authsize = authsize;
193 aead_set_sh_desc(authenc);
194
195 return 0;
196}
197
198static int aead_setkey(struct crypto_aead *aead, const u8 *key,
199 unsigned int keylen)
200{
201 struct caam_ctx *ctx = crypto_aead_ctx(aead);
202 struct device *jrdev = ctx->jrdev;
Horia Geantă7e0880b2017-12-19 12:16:07 +0200203 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
Horia Geantăb1898172017-03-17 12:06:02 +0200204 struct crypto_authenc_keys keys;
205 int ret = 0;
206
207 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
208 goto badkey;
209
210#ifdef DEBUG
211 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212 keys.authkeylen + keys.enckeylen, keys.enckeylen,
213 keys.authkeylen);
214 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
216#endif
217
Horia Geantă7e0880b2017-12-19 12:16:07 +0200218 /*
219 * If DKP is supported, use it in the shared descriptor to generate
220 * the split key.
221 */
222 if (ctrlpriv->era >= 6) {
223 ctx->adata.keylen = keys.authkeylen;
224 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
225 OP_ALG_ALGSEL_MASK);
226
227 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
228 goto badkey;
229
230 memcpy(ctx->key, keys.authkey, keys.authkeylen);
231 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
232 keys.enckeylen);
233 dma_sync_single_for_device(jrdev, ctx->key_dma,
234 ctx->adata.keylen_pad +
235 keys.enckeylen, ctx->dir);
236 goto skip_split_key;
237 }
238
Horia Geantăb1898172017-03-17 12:06:02 +0200239 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
240 keys.authkeylen, CAAM_MAX_KEY_SIZE -
241 keys.enckeylen);
242 if (ret)
243 goto badkey;
244
245 /* postpend encryption key to auth split key */
246 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
Horia Geantă7e0880b2017-12-19 12:16:07 +0200248 keys.enckeylen, ctx->dir);
Horia Geantăb1898172017-03-17 12:06:02 +0200249#ifdef DEBUG
250 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252 ctx->adata.keylen_pad + keys.enckeylen, 1);
253#endif
254
Horia Geantă7e0880b2017-12-19 12:16:07 +0200255skip_split_key:
Horia Geantăb1898172017-03-17 12:06:02 +0200256 ctx->cdata.keylen = keys.enckeylen;
257
258 ret = aead_set_sh_desc(aead);
259 if (ret)
260 goto badkey;
261
262 /* Now update the driver contexts with the new shared descriptor */
263 if (ctx->drv_ctx[ENCRYPT]) {
264 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
265 ctx->sh_desc_enc);
266 if (ret) {
267 dev_err(jrdev, "driver enc context update failed\n");
268 goto badkey;
269 }
270 }
271
272 if (ctx->drv_ctx[DECRYPT]) {
273 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
274 ctx->sh_desc_dec);
275 if (ret) {
276 dev_err(jrdev, "driver dec context update failed\n");
277 goto badkey;
278 }
279 }
280
Tudor-Dan Ambaruscc4ccaa2018-03-23 12:42:19 +0200281 memzero_explicit(&keys, sizeof(keys));
Horia Geantăb1898172017-03-17 12:06:02 +0200282 return ret;
283badkey:
284 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
Tudor-Dan Ambaruscc4ccaa2018-03-23 12:42:19 +0200285 memzero_explicit(&keys, sizeof(keys));
Horia Geantăb1898172017-03-17 12:06:02 +0200286 return -EINVAL;
287}
288
Horia Geantăd3e41b52018-01-29 10:38:37 +0200289static int gcm_set_sh_desc(struct crypto_aead *aead)
290{
291 struct caam_ctx *ctx = crypto_aead_ctx(aead);
292 unsigned int ivsize = crypto_aead_ivsize(aead);
293 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
294 ctx->cdata.keylen;
295
296 if (!ctx->cdata.keylen || !ctx->authsize)
297 return 0;
298
299 /*
300 * Job Descriptor and Shared Descriptor
301 * must fit into the 64-word Descriptor h/w Buffer
302 */
303 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
304 ctx->cdata.key_inline = true;
305 ctx->cdata.key_virt = ctx->key;
306 } else {
307 ctx->cdata.key_inline = false;
308 ctx->cdata.key_dma = ctx->key_dma;
309 }
310
311 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
312 ctx->authsize, true);
313
314 /*
315 * Job Descriptor and Shared Descriptor
316 * must fit into the 64-word Descriptor h/w Buffer
317 */
318 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
319 ctx->cdata.key_inline = true;
320 ctx->cdata.key_virt = ctx->key;
321 } else {
322 ctx->cdata.key_inline = false;
323 ctx->cdata.key_dma = ctx->key_dma;
324 }
325
326 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
327 ctx->authsize, true);
328
329 return 0;
330}
331
332static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
333{
334 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
335
336 ctx->authsize = authsize;
337 gcm_set_sh_desc(authenc);
338
339 return 0;
340}
341
342static int gcm_setkey(struct crypto_aead *aead,
343 const u8 *key, unsigned int keylen)
344{
345 struct caam_ctx *ctx = crypto_aead_ctx(aead);
346 struct device *jrdev = ctx->jrdev;
347 int ret;
348
349#ifdef DEBUG
350 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
352#endif
353
354 memcpy(ctx->key, key, keylen);
355 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
356 ctx->cdata.keylen = keylen;
357
358 ret = gcm_set_sh_desc(aead);
359 if (ret)
360 return ret;
361
362 /* Now update the driver contexts with the new shared descriptor */
363 if (ctx->drv_ctx[ENCRYPT]) {
364 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
365 ctx->sh_desc_enc);
366 if (ret) {
367 dev_err(jrdev, "driver enc context update failed\n");
368 return ret;
369 }
370 }
371
372 if (ctx->drv_ctx[DECRYPT]) {
373 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
374 ctx->sh_desc_dec);
375 if (ret) {
376 dev_err(jrdev, "driver dec context update failed\n");
377 return ret;
378 }
379 }
380
381 return 0;
382}
383
384static int rfc4106_set_sh_desc(struct crypto_aead *aead)
385{
386 struct caam_ctx *ctx = crypto_aead_ctx(aead);
387 unsigned int ivsize = crypto_aead_ivsize(aead);
388 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
389 ctx->cdata.keylen;
390
391 if (!ctx->cdata.keylen || !ctx->authsize)
392 return 0;
393
394 ctx->cdata.key_virt = ctx->key;
395
396 /*
397 * Job Descriptor and Shared Descriptor
398 * must fit into the 64-word Descriptor h/w Buffer
399 */
400 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
401 ctx->cdata.key_inline = true;
402 } else {
403 ctx->cdata.key_inline = false;
404 ctx->cdata.key_dma = ctx->key_dma;
405 }
406
407 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
408 ctx->authsize, true);
409
410 /*
411 * Job Descriptor and Shared Descriptor
412 * must fit into the 64-word Descriptor h/w Buffer
413 */
414 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
415 ctx->cdata.key_inline = true;
416 } else {
417 ctx->cdata.key_inline = false;
418 ctx->cdata.key_dma = ctx->key_dma;
419 }
420
421 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
422 ctx->authsize, true);
423
424 return 0;
425}
426
427static int rfc4106_setauthsize(struct crypto_aead *authenc,
428 unsigned int authsize)
429{
430 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
431
432 ctx->authsize = authsize;
433 rfc4106_set_sh_desc(authenc);
434
435 return 0;
436}
437
438static int rfc4106_setkey(struct crypto_aead *aead,
439 const u8 *key, unsigned int keylen)
440{
441 struct caam_ctx *ctx = crypto_aead_ctx(aead);
442 struct device *jrdev = ctx->jrdev;
443 int ret;
444
445 if (keylen < 4)
446 return -EINVAL;
447
448#ifdef DEBUG
449 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
450 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
451#endif
452
453 memcpy(ctx->key, key, keylen);
454 /*
455 * The last four bytes of the key material are used as the salt value
456 * in the nonce. Update the AES key length.
457 */
458 ctx->cdata.keylen = keylen - 4;
459 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
460 ctx->dir);
461
462 ret = rfc4106_set_sh_desc(aead);
463 if (ret)
464 return ret;
465
466 /* Now update the driver contexts with the new shared descriptor */
467 if (ctx->drv_ctx[ENCRYPT]) {
468 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
469 ctx->sh_desc_enc);
470 if (ret) {
471 dev_err(jrdev, "driver enc context update failed\n");
472 return ret;
473 }
474 }
475
476 if (ctx->drv_ctx[DECRYPT]) {
477 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
478 ctx->sh_desc_dec);
479 if (ret) {
480 dev_err(jrdev, "driver dec context update failed\n");
481 return ret;
482 }
483 }
484
485 return 0;
486}
487
488static int rfc4543_set_sh_desc(struct crypto_aead *aead)
489{
490 struct caam_ctx *ctx = crypto_aead_ctx(aead);
491 unsigned int ivsize = crypto_aead_ivsize(aead);
492 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
493 ctx->cdata.keylen;
494
495 if (!ctx->cdata.keylen || !ctx->authsize)
496 return 0;
497
498 ctx->cdata.key_virt = ctx->key;
499
500 /*
501 * Job Descriptor and Shared Descriptor
502 * must fit into the 64-word Descriptor h/w Buffer
503 */
504 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
505 ctx->cdata.key_inline = true;
506 } else {
507 ctx->cdata.key_inline = false;
508 ctx->cdata.key_dma = ctx->key_dma;
509 }
510
511 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
512 ctx->authsize, true);
513
514 /*
515 * Job Descriptor and Shared Descriptor
516 * must fit into the 64-word Descriptor h/w Buffer
517 */
518 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
519 ctx->cdata.key_inline = true;
520 } else {
521 ctx->cdata.key_inline = false;
522 ctx->cdata.key_dma = ctx->key_dma;
523 }
524
525 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
526 ctx->authsize, true);
527
528 return 0;
529}
530
531static int rfc4543_setauthsize(struct crypto_aead *authenc,
532 unsigned int authsize)
533{
534 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
535
536 ctx->authsize = authsize;
537 rfc4543_set_sh_desc(authenc);
538
539 return 0;
540}
541
542static int rfc4543_setkey(struct crypto_aead *aead,
543 const u8 *key, unsigned int keylen)
544{
545 struct caam_ctx *ctx = crypto_aead_ctx(aead);
546 struct device *jrdev = ctx->jrdev;
547 int ret;
548
549 if (keylen < 4)
550 return -EINVAL;
551
552#ifdef DEBUG
553 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
554 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
555#endif
556
557 memcpy(ctx->key, key, keylen);
558 /*
559 * The last four bytes of the key material are used as the salt value
560 * in the nonce. Update the AES key length.
561 */
562 ctx->cdata.keylen = keylen - 4;
563 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
564 ctx->dir);
565
566 ret = rfc4543_set_sh_desc(aead);
567 if (ret)
568 return ret;
569
570 /* Now update the driver contexts with the new shared descriptor */
571 if (ctx->drv_ctx[ENCRYPT]) {
572 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
573 ctx->sh_desc_enc);
574 if (ret) {
575 dev_err(jrdev, "driver enc context update failed\n");
576 return ret;
577 }
578 }
579
580 if (ctx->drv_ctx[DECRYPT]) {
581 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
582 ctx->sh_desc_dec);
583 if (ret) {
584 dev_err(jrdev, "driver dec context update failed\n");
585 return ret;
586 }
587 }
588
589 return 0;
590}
591
Horia Geantăb1898172017-03-17 12:06:02 +0200592static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
593 const u8 *key, unsigned int keylen)
594{
595 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
596 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
597 const char *alg_name = crypto_tfm_alg_name(tfm);
598 struct device *jrdev = ctx->jrdev;
599 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
600 u32 ctx1_iv_off = 0;
601 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602 OP_ALG_AAI_CTR_MOD128);
603 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
604 int ret = 0;
605
Horia Geantăb1898172017-03-17 12:06:02 +0200606#ifdef DEBUG
607 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
608 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
609#endif
610 /*
611 * AES-CTR needs to load IV in CONTEXT1 reg
612 * at an offset of 128bits (16bytes)
613 * CONTEXT1[255:128] = IV
614 */
615 if (ctr_mode)
616 ctx1_iv_off = 16;
617
618 /*
619 * RFC3686 specific:
620 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
621 * | *key = {KEY, NONCE}
622 */
623 if (is_rfc3686) {
624 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
625 keylen -= CTR_RFC3686_NONCE_SIZE;
626 }
627
Horia Geantăb1898172017-03-17 12:06:02 +0200628 ctx->cdata.keylen = keylen;
Horia Geantă662f70e2017-12-19 12:16:05 +0200629 ctx->cdata.key_virt = key;
Horia Geantăb1898172017-03-17 12:06:02 +0200630 ctx->cdata.key_inline = true;
631
632 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
633 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634 is_rfc3686, ctx1_iv_off);
635 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636 is_rfc3686, ctx1_iv_off);
637 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638 ivsize, is_rfc3686, ctx1_iv_off);
639
640 /* Now update the driver contexts with the new shared descriptor */
641 if (ctx->drv_ctx[ENCRYPT]) {
642 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
643 ctx->sh_desc_enc);
644 if (ret) {
645 dev_err(jrdev, "driver enc context update failed\n");
646 goto badkey;
647 }
648 }
649
650 if (ctx->drv_ctx[DECRYPT]) {
651 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
652 ctx->sh_desc_dec);
653 if (ret) {
654 dev_err(jrdev, "driver dec context update failed\n");
655 goto badkey;
656 }
657 }
658
659 if (ctx->drv_ctx[GIVENCRYPT]) {
660 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661 ctx->sh_desc_givenc);
662 if (ret) {
663 dev_err(jrdev, "driver givenc context update failed\n");
664 goto badkey;
665 }
666 }
667
668 return ret;
669badkey:
670 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
671 return -EINVAL;
672}
673
674static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
675 const u8 *key, unsigned int keylen)
676{
677 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
678 struct device *jrdev = ctx->jrdev;
679 int ret = 0;
680
681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
682 crypto_ablkcipher_set_flags(ablkcipher,
683 CRYPTO_TFM_RES_BAD_KEY_LEN);
684 dev_err(jrdev, "key size mismatch\n");
685 return -EINVAL;
686 }
687
Horia Geantăb1898172017-03-17 12:06:02 +0200688 ctx->cdata.keylen = keylen;
Horia Geantă662f70e2017-12-19 12:16:05 +0200689 ctx->cdata.key_virt = key;
Horia Geantăb1898172017-03-17 12:06:02 +0200690 ctx->cdata.key_inline = true;
691
692 /* xts ablkcipher encrypt, decrypt shared descriptors */
693 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
694 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
695
696 /* Now update the driver contexts with the new shared descriptor */
697 if (ctx->drv_ctx[ENCRYPT]) {
698 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
699 ctx->sh_desc_enc);
700 if (ret) {
701 dev_err(jrdev, "driver enc context update failed\n");
702 goto badkey;
703 }
704 }
705
706 if (ctx->drv_ctx[DECRYPT]) {
707 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
708 ctx->sh_desc_dec);
709 if (ret) {
710 dev_err(jrdev, "driver dec context update failed\n");
711 goto badkey;
712 }
713 }
714
715 return ret;
716badkey:
717 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
718 return 0;
719}
720
721/*
722 * aead_edesc - s/w-extended aead descriptor
723 * @src_nents: number of segments in input scatterlist
724 * @dst_nents: number of segments in output scatterlist
725 * @iv_dma: dma address of iv for checking continuity and link table
726 * @qm_sg_bytes: length of dma mapped h/w link table
727 * @qm_sg_dma: bus physical mapped address of h/w link table
Horia Geantă36cda082017-07-10 08:40:32 +0300728 * @assoclen: associated data length, in CAAM endianness
Horia Geantăb1898172017-03-17 12:06:02 +0200729 * @assoclen_dma: bus physical mapped address of req->assoclen
730 * @drv_req: driver-specific request structure
Horia Geantă3a488aa2018-03-28 15:39:19 +0300731 * @sgt: the h/w link table, followed by IV
Horia Geantăb1898172017-03-17 12:06:02 +0200732 */
733struct aead_edesc {
734 int src_nents;
735 int dst_nents;
736 dma_addr_t iv_dma;
737 int qm_sg_bytes;
738 dma_addr_t qm_sg_dma;
Horia Geantă36cda082017-07-10 08:40:32 +0300739 unsigned int assoclen;
Horia Geantăb1898172017-03-17 12:06:02 +0200740 dma_addr_t assoclen_dma;
741 struct caam_drv_req drv_req;
742 struct qm_sg_entry sgt[0];
743};
744
745/*
746 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
747 * @src_nents: number of segments in input scatterlist
748 * @dst_nents: number of segments in output scatterlist
749 * @iv_dma: dma address of iv for checking continuity and link table
750 * @qm_sg_bytes: length of dma mapped h/w link table
751 * @qm_sg_dma: bus physical mapped address of h/w link table
752 * @drv_req: driver-specific request structure
Horia Geantă3a488aa2018-03-28 15:39:19 +0300753 * @sgt: the h/w link table, followed by IV
Horia Geantăb1898172017-03-17 12:06:02 +0200754 */
755struct ablkcipher_edesc {
756 int src_nents;
757 int dst_nents;
758 dma_addr_t iv_dma;
759 int qm_sg_bytes;
760 dma_addr_t qm_sg_dma;
761 struct caam_drv_req drv_req;
762 struct qm_sg_entry sgt[0];
763};
764
765static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
766 enum optype type)
767{
768 /*
769 * This function is called on the fast path with values of 'type'
770 * known at compile time. Invalid arguments are not expected and
771 * thus no checks are made.
772 */
773 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
774 u32 *desc;
775
776 if (unlikely(!drv_ctx)) {
777 spin_lock(&ctx->lock);
778
779 /* Read again to check if some other core init drv_ctx */
780 drv_ctx = ctx->drv_ctx[type];
781 if (!drv_ctx) {
782 int cpu;
783
784 if (type == ENCRYPT)
785 desc = ctx->sh_desc_enc;
786 else if (type == DECRYPT)
787 desc = ctx->sh_desc_dec;
788 else /* (type == GIVENCRYPT) */
789 desc = ctx->sh_desc_givenc;
790
791 cpu = smp_processor_id();
792 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
793 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
794 drv_ctx->op_type = type;
795
796 ctx->drv_ctx[type] = drv_ctx;
797 }
798
799 spin_unlock(&ctx->lock);
800 }
801
802 return drv_ctx;
803}
804
805static void caam_unmap(struct device *dev, struct scatterlist *src,
806 struct scatterlist *dst, int src_nents,
807 int dst_nents, dma_addr_t iv_dma, int ivsize,
808 enum optype op_type, dma_addr_t qm_sg_dma,
809 int qm_sg_bytes)
810{
811 if (dst != src) {
812 if (src_nents)
813 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
814 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
815 } else {
816 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
817 }
818
819 if (iv_dma)
820 dma_unmap_single(dev, iv_dma, ivsize,
821 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
822 DMA_TO_DEVICE);
823 if (qm_sg_bytes)
824 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
825}
826
827static void aead_unmap(struct device *dev,
828 struct aead_edesc *edesc,
829 struct aead_request *req)
830{
831 struct crypto_aead *aead = crypto_aead_reqtfm(req);
832 int ivsize = crypto_aead_ivsize(aead);
833
834 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
835 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
836 edesc->qm_sg_dma, edesc->qm_sg_bytes);
837 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
838}
839
840static void ablkcipher_unmap(struct device *dev,
841 struct ablkcipher_edesc *edesc,
842 struct ablkcipher_request *req)
843{
844 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
845 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
846
847 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
848 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
849 edesc->qm_sg_dma, edesc->qm_sg_bytes);
850}
851
852static void aead_done(struct caam_drv_req *drv_req, u32 status)
853{
854 struct device *qidev;
855 struct aead_edesc *edesc;
856 struct aead_request *aead_req = drv_req->app_ctx;
857 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
858 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
859 int ecode = 0;
860
861 qidev = caam_ctx->qidev;
862
863 if (unlikely(status)) {
Horia Geantăcb3078f2018-01-29 10:38:35 +0200864 u32 ssrc = status & JRSTA_SSRC_MASK;
865 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
866
Horia Geantăb1898172017-03-17 12:06:02 +0200867 caam_jr_strstatus(qidev, status);
Horia Geantăcb3078f2018-01-29 10:38:35 +0200868 /*
869 * verify hw auth check passed else return -EBADMSG
870 */
871 if (ssrc == JRSTA_SSRC_CCB_ERROR &&
872 err_id == JRSTA_CCBERR_ERRID_ICVCHK)
873 ecode = -EBADMSG;
874 else
875 ecode = -EIO;
Horia Geantăb1898172017-03-17 12:06:02 +0200876 }
877
878 edesc = container_of(drv_req, typeof(*edesc), drv_req);
879 aead_unmap(qidev, edesc, aead_req);
880
881 aead_request_complete(aead_req, ecode);
882 qi_cache_free(edesc);
883}
884
885/*
886 * allocate and map the aead extended descriptor
887 */
888static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
889 bool encrypt)
890{
891 struct crypto_aead *aead = crypto_aead_reqtfm(req);
892 struct caam_ctx *ctx = crypto_aead_ctx(aead);
893 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
894 typeof(*alg), aead);
895 struct device *qidev = ctx->qidev;
Horia Geantă019d62d2017-06-19 11:44:46 +0300896 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
897 GFP_KERNEL : GFP_ATOMIC;
Horia Geantăb1898172017-03-17 12:06:02 +0200898 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
899 struct aead_edesc *edesc;
900 dma_addr_t qm_sg_dma, iv_dma = 0;
901 int ivsize = 0;
902 unsigned int authsize = ctx->authsize;
903 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
904 int in_len, out_len;
905 struct qm_sg_entry *sg_table, *fd_sgt;
906 struct caam_drv_ctx *drv_ctx;
907 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
908
909 drv_ctx = get_drv_ctx(ctx, op_type);
910 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
911 return (struct aead_edesc *)drv_ctx;
912
913 /* allocate space for base edesc and hw desc commands, link tables */
914 edesc = qi_cache_alloc(GFP_DMA | flags);
915 if (unlikely(!edesc)) {
916 dev_err(qidev, "could not allocate extended descriptor\n");
917 return ERR_PTR(-ENOMEM);
918 }
919
920 if (likely(req->src == req->dst)) {
921 src_nents = sg_nents_for_len(req->src, req->assoclen +
922 req->cryptlen +
923 (encrypt ? authsize : 0));
924 if (unlikely(src_nents < 0)) {
925 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
926 req->assoclen + req->cryptlen +
927 (encrypt ? authsize : 0));
928 qi_cache_free(edesc);
929 return ERR_PTR(src_nents);
930 }
931
932 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
933 DMA_BIDIRECTIONAL);
934 if (unlikely(!mapped_src_nents)) {
935 dev_err(qidev, "unable to map source\n");
936 qi_cache_free(edesc);
937 return ERR_PTR(-ENOMEM);
938 }
939 } else {
940 src_nents = sg_nents_for_len(req->src, req->assoclen +
941 req->cryptlen);
942 if (unlikely(src_nents < 0)) {
943 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
944 req->assoclen + req->cryptlen);
945 qi_cache_free(edesc);
946 return ERR_PTR(src_nents);
947 }
948
949 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
950 req->cryptlen +
951 (encrypt ? authsize :
952 (-authsize)));
953 if (unlikely(dst_nents < 0)) {
954 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
955 req->assoclen + req->cryptlen +
956 (encrypt ? authsize : (-authsize)));
957 qi_cache_free(edesc);
958 return ERR_PTR(dst_nents);
959 }
960
961 if (src_nents) {
962 mapped_src_nents = dma_map_sg(qidev, req->src,
963 src_nents, DMA_TO_DEVICE);
964 if (unlikely(!mapped_src_nents)) {
965 dev_err(qidev, "unable to map source\n");
966 qi_cache_free(edesc);
967 return ERR_PTR(-ENOMEM);
968 }
969 } else {
970 mapped_src_nents = 0;
971 }
972
973 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
974 DMA_FROM_DEVICE);
975 if (unlikely(!mapped_dst_nents)) {
976 dev_err(qidev, "unable to map destination\n");
977 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
978 qi_cache_free(edesc);
979 return ERR_PTR(-ENOMEM);
980 }
981 }
982
Horia Geantă3a488aa2018-03-28 15:39:19 +0300983 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
Horia Geantăb1898172017-03-17 12:06:02 +0200984 ivsize = crypto_aead_ivsize(aead);
Horia Geantăb1898172017-03-17 12:06:02 +0200985
986 /*
987 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
988 * Input is not contiguous.
989 */
990 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
991 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
Horia Geantă3a488aa2018-03-28 15:39:19 +0300992 sg_table = &edesc->sgt[0];
993 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
994 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
995 CAAM_QI_MEMCACHE_SIZE)) {
996 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
997 qm_sg_ents, ivsize);
998 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
999 0, 0, 0, 0);
Horia Geantăeb9ba372017-07-10 08:40:31 +03001000 qi_cache_free(edesc);
1001 return ERR_PTR(-ENOMEM);
1002 }
Horia Geantă3a488aa2018-03-28 15:39:19 +03001003
1004 if (ivsize) {
1005 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1006
1007 /* Make sure IV is located in a DMAable area */
1008 memcpy(iv, req->iv, ivsize);
1009
1010 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1011 if (dma_mapping_error(qidev, iv_dma)) {
1012 dev_err(qidev, "unable to map IV\n");
1013 caam_unmap(qidev, req->src, req->dst, src_nents,
1014 dst_nents, 0, 0, 0, 0, 0);
1015 qi_cache_free(edesc);
1016 return ERR_PTR(-ENOMEM);
1017 }
1018 }
Horia Geantăb1898172017-03-17 12:06:02 +02001019
1020 edesc->src_nents = src_nents;
1021 edesc->dst_nents = dst_nents;
1022 edesc->iv_dma = iv_dma;
1023 edesc->drv_req.app_ctx = req;
1024 edesc->drv_req.cbk = aead_done;
1025 edesc->drv_req.drv_ctx = drv_ctx;
1026
Horia Geantă36cda082017-07-10 08:40:32 +03001027 edesc->assoclen = cpu_to_caam32(req->assoclen);
1028 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
Horia Geantăb1898172017-03-17 12:06:02 +02001029 DMA_TO_DEVICE);
1030 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1031 dev_err(qidev, "unable to map assoclen\n");
1032 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1033 iv_dma, ivsize, op_type, 0, 0);
1034 qi_cache_free(edesc);
1035 return ERR_PTR(-ENOMEM);
1036 }
1037
1038 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1039 qm_sg_index++;
1040 if (ivsize) {
1041 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1042 qm_sg_index++;
1043 }
1044 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1045 qm_sg_index += mapped_src_nents;
1046
1047 if (mapped_dst_nents > 1)
1048 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1049 qm_sg_index, 0);
1050
1051 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1052 if (dma_mapping_error(qidev, qm_sg_dma)) {
1053 dev_err(qidev, "unable to map S/G table\n");
1054 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1055 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1056 iv_dma, ivsize, op_type, 0, 0);
1057 qi_cache_free(edesc);
1058 return ERR_PTR(-ENOMEM);
1059 }
1060
1061 edesc->qm_sg_dma = qm_sg_dma;
1062 edesc->qm_sg_bytes = qm_sg_bytes;
1063
1064 out_len = req->assoclen + req->cryptlen +
1065 (encrypt ? ctx->authsize : (-ctx->authsize));
1066 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1067
1068 fd_sgt = &edesc->drv_req.fd_sgt[0];
1069 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1070
1071 if (req->dst == req->src) {
1072 if (mapped_src_nents == 1)
1073 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1074 out_len, 0);
1075 else
1076 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1077 (1 + !!ivsize) * sizeof(*sg_table),
1078 out_len, 0);
1079 } else if (mapped_dst_nents == 1) {
1080 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1081 0);
1082 } else {
1083 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1084 qm_sg_index, out_len, 0);
1085 }
1086
1087 return edesc;
1088}
1089
1090static inline int aead_crypt(struct aead_request *req, bool encrypt)
1091{
1092 struct aead_edesc *edesc;
1093 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1094 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1095 int ret;
1096
1097 if (unlikely(caam_congested))
1098 return -EAGAIN;
1099
1100 /* allocate extended descriptor */
1101 edesc = aead_edesc_alloc(req, encrypt);
1102 if (IS_ERR_OR_NULL(edesc))
1103 return PTR_ERR(edesc);
1104
1105 /* Create and submit job descriptor */
1106 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1107 if (!ret) {
1108 ret = -EINPROGRESS;
1109 } else {
1110 aead_unmap(ctx->qidev, edesc, req);
1111 qi_cache_free(edesc);
1112 }
1113
1114 return ret;
1115}
1116
1117static int aead_encrypt(struct aead_request *req)
1118{
1119 return aead_crypt(req, true);
1120}
1121
1122static int aead_decrypt(struct aead_request *req)
1123{
1124 return aead_crypt(req, false);
1125}
1126
Horia Geantăd3e41b52018-01-29 10:38:37 +02001127static int ipsec_gcm_encrypt(struct aead_request *req)
1128{
1129 if (req->assoclen < 8)
1130 return -EINVAL;
1131
1132 return aead_crypt(req, true);
1133}
1134
1135static int ipsec_gcm_decrypt(struct aead_request *req)
1136{
1137 if (req->assoclen < 8)
1138 return -EINVAL;
1139
1140 return aead_crypt(req, false);
1141}
1142
Horia Geantăb1898172017-03-17 12:06:02 +02001143static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1144{
1145 struct ablkcipher_edesc *edesc;
1146 struct ablkcipher_request *req = drv_req->app_ctx;
1147 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1148 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1149 struct device *qidev = caam_ctx->qidev;
Horia Geantăb1898172017-03-17 12:06:02 +02001150 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1151
Horia Geantăa68a1932017-07-10 08:40:30 +03001152#ifdef DEBUG
Horia Geantăb1898172017-03-17 12:06:02 +02001153 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1154#endif
1155
1156 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1157
1158 if (status)
1159 caam_jr_strstatus(qidev, status);
1160
1161#ifdef DEBUG
1162 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1163 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1164 edesc->src_nents > 1 ? 100 : ivsize, 1);
Horia Geantă972b8122017-07-10 08:40:28 +03001165 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1166 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1167 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Horia Geantăb1898172017-03-17 12:06:02 +02001168#endif
1169
1170 ablkcipher_unmap(qidev, edesc, req);
Horia Geantă3a488aa2018-03-28 15:39:19 +03001171
1172 /* In case initial IV was generated, copy it in GIVCIPHER request */
1173 if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1174 u8 *iv;
1175 struct skcipher_givcrypt_request *greq;
1176
1177 greq = container_of(req, struct skcipher_givcrypt_request,
1178 creq);
1179 iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1180 memcpy(greq->giv, iv, ivsize);
1181 }
Horia Geantăb1898172017-03-17 12:06:02 +02001182
Horia Geantăa68a1932017-07-10 08:40:30 +03001183 /*
1184 * The crypto API expects us to set the IV (req->info) to the last
1185 * ciphertext block. This is used e.g. by the CTS mode.
1186 */
Horia Geantă3a488aa2018-03-28 15:39:19 +03001187 if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1188 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1189 ivsize, ivsize, 0);
Horia Geantăa68a1932017-07-10 08:40:30 +03001190
Horia Geantă3a488aa2018-03-28 15:39:19 +03001191 qi_cache_free(edesc);
Horia Geantăb1898172017-03-17 12:06:02 +02001192 ablkcipher_request_complete(req, status);
1193}
1194
1195static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1196 *req, bool encrypt)
1197{
1198 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1199 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1200 struct device *qidev = ctx->qidev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001201 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
Horia Geantăb1898172017-03-17 12:06:02 +02001202 GFP_KERNEL : GFP_ATOMIC;
1203 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1204 struct ablkcipher_edesc *edesc;
1205 dma_addr_t iv_dma;
Horia Geantă3a488aa2018-03-28 15:39:19 +03001206 u8 *iv;
Horia Geantăb1898172017-03-17 12:06:02 +02001207 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Horia Geantă3a488aa2018-03-28 15:39:19 +03001208 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
Horia Geantăb1898172017-03-17 12:06:02 +02001209 struct qm_sg_entry *sg_table, *fd_sgt;
1210 struct caam_drv_ctx *drv_ctx;
1211 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1212
1213 drv_ctx = get_drv_ctx(ctx, op_type);
1214 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1215 return (struct ablkcipher_edesc *)drv_ctx;
1216
1217 src_nents = sg_nents_for_len(req->src, req->nbytes);
1218 if (unlikely(src_nents < 0)) {
1219 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1220 req->nbytes);
1221 return ERR_PTR(src_nents);
1222 }
1223
1224 if (unlikely(req->src != req->dst)) {
1225 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1226 if (unlikely(dst_nents < 0)) {
1227 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1228 req->nbytes);
1229 return ERR_PTR(dst_nents);
1230 }
1231
1232 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1233 DMA_TO_DEVICE);
1234 if (unlikely(!mapped_src_nents)) {
1235 dev_err(qidev, "unable to map source\n");
1236 return ERR_PTR(-ENOMEM);
1237 }
1238
1239 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1240 DMA_FROM_DEVICE);
1241 if (unlikely(!mapped_dst_nents)) {
1242 dev_err(qidev, "unable to map destination\n");
1243 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1244 return ERR_PTR(-ENOMEM);
1245 }
1246 } else {
1247 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1248 DMA_BIDIRECTIONAL);
1249 if (unlikely(!mapped_src_nents)) {
1250 dev_err(qidev, "unable to map source\n");
1251 return ERR_PTR(-ENOMEM);
1252 }
1253 }
1254
Horia Geantă3a488aa2018-03-28 15:39:19 +03001255 qm_sg_ents = 1 + mapped_src_nents;
1256 dst_sg_idx = qm_sg_ents;
1257
1258 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1259 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1260 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1261 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1262 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1263 qm_sg_ents, ivsize);
Horia Geantăb1898172017-03-17 12:06:02 +02001264 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1265 0, 0, 0, 0);
1266 return ERR_PTR(-ENOMEM);
1267 }
1268
Horia Geantă3a488aa2018-03-28 15:39:19 +03001269 /* allocate space for base edesc, link tables and IV */
Horia Geantăb1898172017-03-17 12:06:02 +02001270 edesc = qi_cache_alloc(GFP_DMA | flags);
1271 if (unlikely(!edesc)) {
1272 dev_err(qidev, "could not allocate extended descriptor\n");
Horia Geantă3a488aa2018-03-28 15:39:19 +03001273 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1274 0, 0, 0, 0);
1275 return ERR_PTR(-ENOMEM);
1276 }
1277
1278 /* Make sure IV is located in a DMAable area */
1279 sg_table = &edesc->sgt[0];
1280 iv = (u8 *)(sg_table + qm_sg_ents);
1281 memcpy(iv, req->info, ivsize);
1282
1283 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1284 if (dma_mapping_error(qidev, iv_dma)) {
1285 dev_err(qidev, "unable to map IV\n");
1286 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1287 0, 0, 0, 0);
1288 qi_cache_free(edesc);
Horia Geantăb1898172017-03-17 12:06:02 +02001289 return ERR_PTR(-ENOMEM);
1290 }
1291
1292 edesc->src_nents = src_nents;
1293 edesc->dst_nents = dst_nents;
1294 edesc->iv_dma = iv_dma;
Horia Geantă3a488aa2018-03-28 15:39:19 +03001295 edesc->qm_sg_bytes = qm_sg_bytes;
Horia Geantăb1898172017-03-17 12:06:02 +02001296 edesc->drv_req.app_ctx = req;
1297 edesc->drv_req.cbk = ablkcipher_done;
1298 edesc->drv_req.drv_ctx = drv_ctx;
1299
Horia Geantă3a488aa2018-03-28 15:39:19 +03001300 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1301 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
Horia Geantăb1898172017-03-17 12:06:02 +02001302
1303 if (mapped_dst_nents > 1)
1304 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1305 dst_sg_idx, 0);
1306
1307 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1308 DMA_TO_DEVICE);
1309 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1310 dev_err(qidev, "unable to map S/G table\n");
1311 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1312 iv_dma, ivsize, op_type, 0, 0);
1313 qi_cache_free(edesc);
1314 return ERR_PTR(-ENOMEM);
1315 }
1316
1317 fd_sgt = &edesc->drv_req.fd_sgt[0];
1318
Horia Geantă3a488aa2018-03-28 15:39:19 +03001319 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1320 ivsize + req->nbytes, 0);
Horia Geantăb1898172017-03-17 12:06:02 +02001321
1322 if (req->src == req->dst) {
Horia Geantă3a488aa2018-03-28 15:39:19 +03001323 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1324 sizeof(*sg_table), req->nbytes, 0);
Horia Geantăb1898172017-03-17 12:06:02 +02001325 } else if (mapped_dst_nents > 1) {
1326 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1327 sizeof(*sg_table), req->nbytes, 0);
1328 } else {
1329 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1330 req->nbytes, 0);
1331 }
1332
1333 return edesc;
1334}
1335
1336static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1337 struct skcipher_givcrypt_request *creq)
1338{
1339 struct ablkcipher_request *req = &creq->creq;
1340 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1341 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1342 struct device *qidev = ctx->qidev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001343 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
Horia Geantăb1898172017-03-17 12:06:02 +02001344 GFP_KERNEL : GFP_ATOMIC;
1345 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1346 struct ablkcipher_edesc *edesc;
1347 dma_addr_t iv_dma;
Horia Geantă3a488aa2018-03-28 15:39:19 +03001348 u8 *iv;
Horia Geantăb1898172017-03-17 12:06:02 +02001349 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1350 struct qm_sg_entry *sg_table, *fd_sgt;
Horia Geantă3a488aa2018-03-28 15:39:19 +03001351 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
Horia Geantăb1898172017-03-17 12:06:02 +02001352 struct caam_drv_ctx *drv_ctx;
1353
1354 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1355 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1356 return (struct ablkcipher_edesc *)drv_ctx;
1357
1358 src_nents = sg_nents_for_len(req->src, req->nbytes);
1359 if (unlikely(src_nents < 0)) {
1360 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1361 req->nbytes);
1362 return ERR_PTR(src_nents);
1363 }
1364
1365 if (unlikely(req->src != req->dst)) {
1366 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1367 if (unlikely(dst_nents < 0)) {
1368 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1369 req->nbytes);
1370 return ERR_PTR(dst_nents);
1371 }
1372
1373 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1374 DMA_TO_DEVICE);
1375 if (unlikely(!mapped_src_nents)) {
1376 dev_err(qidev, "unable to map source\n");
1377 return ERR_PTR(-ENOMEM);
1378 }
1379
1380 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1381 DMA_FROM_DEVICE);
1382 if (unlikely(!mapped_dst_nents)) {
1383 dev_err(qidev, "unable to map destination\n");
1384 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1385 return ERR_PTR(-ENOMEM);
1386 }
1387 } else {
1388 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1389 DMA_BIDIRECTIONAL);
1390 if (unlikely(!mapped_src_nents)) {
1391 dev_err(qidev, "unable to map source\n");
1392 return ERR_PTR(-ENOMEM);
1393 }
1394
1395 dst_nents = src_nents;
1396 mapped_dst_nents = src_nents;
1397 }
1398
Horia Geantă3a488aa2018-03-28 15:39:19 +03001399 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1400 dst_sg_idx = qm_sg_ents;
1401
1402 qm_sg_ents += 1 + mapped_dst_nents;
1403 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1404 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1405 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1406 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1407 qm_sg_ents, ivsize);
Horia Geantăb1898172017-03-17 12:06:02 +02001408 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1409 0, 0, 0, 0);
1410 return ERR_PTR(-ENOMEM);
1411 }
1412
Horia Geantă3a488aa2018-03-28 15:39:19 +03001413 /* allocate space for base edesc, link tables and IV */
Horia Geantăb1898172017-03-17 12:06:02 +02001414 edesc = qi_cache_alloc(GFP_DMA | flags);
1415 if (!edesc) {
1416 dev_err(qidev, "could not allocate extended descriptor\n");
Horia Geantă3a488aa2018-03-28 15:39:19 +03001417 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1418 0, 0, 0, 0);
1419 return ERR_PTR(-ENOMEM);
1420 }
1421
1422 /* Make sure IV is located in a DMAable area */
1423 sg_table = &edesc->sgt[0];
1424 iv = (u8 *)(sg_table + qm_sg_ents);
1425 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1426 if (dma_mapping_error(qidev, iv_dma)) {
1427 dev_err(qidev, "unable to map IV\n");
1428 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1429 0, 0, 0, 0);
1430 qi_cache_free(edesc);
Horia Geantăb1898172017-03-17 12:06:02 +02001431 return ERR_PTR(-ENOMEM);
1432 }
1433
1434 edesc->src_nents = src_nents;
1435 edesc->dst_nents = dst_nents;
1436 edesc->iv_dma = iv_dma;
Horia Geantă3a488aa2018-03-28 15:39:19 +03001437 edesc->qm_sg_bytes = qm_sg_bytes;
Horia Geantăb1898172017-03-17 12:06:02 +02001438 edesc->drv_req.app_ctx = req;
1439 edesc->drv_req.cbk = ablkcipher_done;
1440 edesc->drv_req.drv_ctx = drv_ctx;
1441
1442 if (mapped_src_nents > 1)
1443 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1444
Horia Geantă3a488aa2018-03-28 15:39:19 +03001445 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1446 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1447 0);
Horia Geantăb1898172017-03-17 12:06:02 +02001448
1449 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1450 DMA_TO_DEVICE);
1451 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1452 dev_err(qidev, "unable to map S/G table\n");
1453 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1454 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1455 qi_cache_free(edesc);
1456 return ERR_PTR(-ENOMEM);
1457 }
1458
1459 fd_sgt = &edesc->drv_req.fd_sgt[0];
1460
1461 if (mapped_src_nents > 1)
1462 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1463 0);
1464 else
1465 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1466 req->nbytes, 0);
1467
Horia Geantă3a488aa2018-03-28 15:39:19 +03001468 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1469 sizeof(*sg_table), ivsize + req->nbytes, 0);
Horia Geantăb1898172017-03-17 12:06:02 +02001470
1471 return edesc;
1472}
1473
1474static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1475{
1476 struct ablkcipher_edesc *edesc;
1477 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1478 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Horia Geantă3a488aa2018-03-28 15:39:19 +03001479 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Horia Geantăb1898172017-03-17 12:06:02 +02001480 int ret;
1481
1482 if (unlikely(caam_congested))
1483 return -EAGAIN;
1484
1485 /* allocate extended descriptor */
1486 edesc = ablkcipher_edesc_alloc(req, encrypt);
1487 if (IS_ERR(edesc))
1488 return PTR_ERR(edesc);
1489
Horia Geantă3a488aa2018-03-28 15:39:19 +03001490 /*
1491 * The crypto API expects us to set the IV (req->info) to the last
1492 * ciphertext block.
1493 */
1494 if (!encrypt)
1495 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1496 ivsize, ivsize, 0);
1497
Horia Geantăb1898172017-03-17 12:06:02 +02001498 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1499 if (!ret) {
1500 ret = -EINPROGRESS;
1501 } else {
1502 ablkcipher_unmap(ctx->qidev, edesc, req);
1503 qi_cache_free(edesc);
1504 }
1505
1506 return ret;
1507}
1508
1509static int ablkcipher_encrypt(struct ablkcipher_request *req)
1510{
1511 return ablkcipher_crypt(req, true);
1512}
1513
1514static int ablkcipher_decrypt(struct ablkcipher_request *req)
1515{
1516 return ablkcipher_crypt(req, false);
1517}
1518
1519static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1520{
1521 struct ablkcipher_request *req = &creq->creq;
1522 struct ablkcipher_edesc *edesc;
1523 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1524 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1525 int ret;
1526
1527 if (unlikely(caam_congested))
1528 return -EAGAIN;
1529
1530 /* allocate extended descriptor */
1531 edesc = ablkcipher_giv_edesc_alloc(creq);
1532 if (IS_ERR(edesc))
1533 return PTR_ERR(edesc);
1534
1535 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1536 if (!ret) {
1537 ret = -EINPROGRESS;
1538 } else {
1539 ablkcipher_unmap(ctx->qidev, edesc, req);
1540 qi_cache_free(edesc);
1541 }
1542
1543 return ret;
1544}
1545
1546#define template_ablkcipher template_u.ablkcipher
1547struct caam_alg_template {
1548 char name[CRYPTO_MAX_ALG_NAME];
1549 char driver_name[CRYPTO_MAX_ALG_NAME];
1550 unsigned int blocksize;
1551 u32 type;
1552 union {
1553 struct ablkcipher_alg ablkcipher;
1554 } template_u;
1555 u32 class1_alg_type;
1556 u32 class2_alg_type;
1557};
1558
1559static struct caam_alg_template driver_algs[] = {
1560 /* ablkcipher descriptor */
1561 {
1562 .name = "cbc(aes)",
1563 .driver_name = "cbc-aes-caam-qi",
1564 .blocksize = AES_BLOCK_SIZE,
1565 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1566 .template_ablkcipher = {
1567 .setkey = ablkcipher_setkey,
1568 .encrypt = ablkcipher_encrypt,
1569 .decrypt = ablkcipher_decrypt,
1570 .givencrypt = ablkcipher_givencrypt,
1571 .geniv = "<built-in>",
1572 .min_keysize = AES_MIN_KEY_SIZE,
1573 .max_keysize = AES_MAX_KEY_SIZE,
1574 .ivsize = AES_BLOCK_SIZE,
1575 },
1576 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1577 },
1578 {
1579 .name = "cbc(des3_ede)",
1580 .driver_name = "cbc-3des-caam-qi",
1581 .blocksize = DES3_EDE_BLOCK_SIZE,
1582 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1583 .template_ablkcipher = {
1584 .setkey = ablkcipher_setkey,
1585 .encrypt = ablkcipher_encrypt,
1586 .decrypt = ablkcipher_decrypt,
1587 .givencrypt = ablkcipher_givencrypt,
1588 .geniv = "<built-in>",
1589 .min_keysize = DES3_EDE_KEY_SIZE,
1590 .max_keysize = DES3_EDE_KEY_SIZE,
1591 .ivsize = DES3_EDE_BLOCK_SIZE,
1592 },
1593 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1594 },
1595 {
1596 .name = "cbc(des)",
1597 .driver_name = "cbc-des-caam-qi",
1598 .blocksize = DES_BLOCK_SIZE,
1599 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1600 .template_ablkcipher = {
1601 .setkey = ablkcipher_setkey,
1602 .encrypt = ablkcipher_encrypt,
1603 .decrypt = ablkcipher_decrypt,
1604 .givencrypt = ablkcipher_givencrypt,
1605 .geniv = "<built-in>",
1606 .min_keysize = DES_KEY_SIZE,
1607 .max_keysize = DES_KEY_SIZE,
1608 .ivsize = DES_BLOCK_SIZE,
1609 },
1610 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1611 },
1612 {
1613 .name = "ctr(aes)",
1614 .driver_name = "ctr-aes-caam-qi",
1615 .blocksize = 1,
1616 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1617 .template_ablkcipher = {
1618 .setkey = ablkcipher_setkey,
1619 .encrypt = ablkcipher_encrypt,
1620 .decrypt = ablkcipher_decrypt,
1621 .geniv = "chainiv",
1622 .min_keysize = AES_MIN_KEY_SIZE,
1623 .max_keysize = AES_MAX_KEY_SIZE,
1624 .ivsize = AES_BLOCK_SIZE,
1625 },
1626 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1627 },
1628 {
1629 .name = "rfc3686(ctr(aes))",
1630 .driver_name = "rfc3686-ctr-aes-caam-qi",
1631 .blocksize = 1,
1632 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1633 .template_ablkcipher = {
1634 .setkey = ablkcipher_setkey,
1635 .encrypt = ablkcipher_encrypt,
1636 .decrypt = ablkcipher_decrypt,
1637 .givencrypt = ablkcipher_givencrypt,
1638 .geniv = "<built-in>",
1639 .min_keysize = AES_MIN_KEY_SIZE +
1640 CTR_RFC3686_NONCE_SIZE,
1641 .max_keysize = AES_MAX_KEY_SIZE +
1642 CTR_RFC3686_NONCE_SIZE,
1643 .ivsize = CTR_RFC3686_IV_SIZE,
1644 },
1645 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1646 },
1647 {
1648 .name = "xts(aes)",
1649 .driver_name = "xts-aes-caam-qi",
1650 .blocksize = AES_BLOCK_SIZE,
1651 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1652 .template_ablkcipher = {
1653 .setkey = xts_ablkcipher_setkey,
1654 .encrypt = ablkcipher_encrypt,
1655 .decrypt = ablkcipher_decrypt,
1656 .geniv = "eseqiv",
1657 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1658 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1659 .ivsize = AES_BLOCK_SIZE,
1660 },
1661 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1662 },
1663};
1664
1665static struct caam_aead_alg driver_aeads[] = {
Horia Geantăd3e41b52018-01-29 10:38:37 +02001666 {
1667 .aead = {
1668 .base = {
1669 .cra_name = "rfc4106(gcm(aes))",
1670 .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1671 .cra_blocksize = 1,
1672 },
1673 .setkey = rfc4106_setkey,
1674 .setauthsize = rfc4106_setauthsize,
1675 .encrypt = ipsec_gcm_encrypt,
1676 .decrypt = ipsec_gcm_decrypt,
1677 .ivsize = 8,
1678 .maxauthsize = AES_BLOCK_SIZE,
1679 },
1680 .caam = {
1681 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1682 },
1683 },
1684 {
1685 .aead = {
1686 .base = {
1687 .cra_name = "rfc4543(gcm(aes))",
1688 .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1689 .cra_blocksize = 1,
1690 },
1691 .setkey = rfc4543_setkey,
1692 .setauthsize = rfc4543_setauthsize,
1693 .encrypt = ipsec_gcm_encrypt,
1694 .decrypt = ipsec_gcm_decrypt,
1695 .ivsize = 8,
1696 .maxauthsize = AES_BLOCK_SIZE,
1697 },
1698 .caam = {
1699 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1700 },
1701 },
1702 /* Galois Counter Mode */
1703 {
1704 .aead = {
1705 .base = {
1706 .cra_name = "gcm(aes)",
1707 .cra_driver_name = "gcm-aes-caam-qi",
1708 .cra_blocksize = 1,
1709 },
1710 .setkey = gcm_setkey,
1711 .setauthsize = gcm_setauthsize,
1712 .encrypt = aead_encrypt,
1713 .decrypt = aead_decrypt,
1714 .ivsize = 12,
1715 .maxauthsize = AES_BLOCK_SIZE,
1716 },
1717 .caam = {
1718 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1719 }
1720 },
Horia Geantăb1898172017-03-17 12:06:02 +02001721 /* single-pass ipsec_esp descriptor */
1722 {
1723 .aead = {
1724 .base = {
1725 .cra_name = "authenc(hmac(md5),cbc(aes))",
1726 .cra_driver_name = "authenc-hmac-md5-"
1727 "cbc-aes-caam-qi",
1728 .cra_blocksize = AES_BLOCK_SIZE,
1729 },
1730 .setkey = aead_setkey,
1731 .setauthsize = aead_setauthsize,
1732 .encrypt = aead_encrypt,
1733 .decrypt = aead_decrypt,
1734 .ivsize = AES_BLOCK_SIZE,
1735 .maxauthsize = MD5_DIGEST_SIZE,
1736 },
1737 .caam = {
1738 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1739 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1740 OP_ALG_AAI_HMAC_PRECOMP,
1741 }
1742 },
1743 {
1744 .aead = {
1745 .base = {
1746 .cra_name = "echainiv(authenc(hmac(md5),"
1747 "cbc(aes)))",
1748 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1749 "cbc-aes-caam-qi",
1750 .cra_blocksize = AES_BLOCK_SIZE,
1751 },
1752 .setkey = aead_setkey,
1753 .setauthsize = aead_setauthsize,
1754 .encrypt = aead_encrypt,
1755 .decrypt = aead_decrypt,
1756 .ivsize = AES_BLOCK_SIZE,
1757 .maxauthsize = MD5_DIGEST_SIZE,
1758 },
1759 .caam = {
1760 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1761 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1762 OP_ALG_AAI_HMAC_PRECOMP,
1763 .geniv = true,
1764 }
1765 },
1766 {
1767 .aead = {
1768 .base = {
1769 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1770 .cra_driver_name = "authenc-hmac-sha1-"
1771 "cbc-aes-caam-qi",
1772 .cra_blocksize = AES_BLOCK_SIZE,
1773 },
1774 .setkey = aead_setkey,
1775 .setauthsize = aead_setauthsize,
1776 .encrypt = aead_encrypt,
1777 .decrypt = aead_decrypt,
1778 .ivsize = AES_BLOCK_SIZE,
1779 .maxauthsize = SHA1_DIGEST_SIZE,
1780 },
1781 .caam = {
1782 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1783 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1784 OP_ALG_AAI_HMAC_PRECOMP,
1785 }
1786 },
1787 {
1788 .aead = {
1789 .base = {
1790 .cra_name = "echainiv(authenc(hmac(sha1),"
1791 "cbc(aes)))",
1792 .cra_driver_name = "echainiv-authenc-"
1793 "hmac-sha1-cbc-aes-caam-qi",
1794 .cra_blocksize = AES_BLOCK_SIZE,
1795 },
1796 .setkey = aead_setkey,
1797 .setauthsize = aead_setauthsize,
1798 .encrypt = aead_encrypt,
1799 .decrypt = aead_decrypt,
1800 .ivsize = AES_BLOCK_SIZE,
1801 .maxauthsize = SHA1_DIGEST_SIZE,
1802 },
1803 .caam = {
1804 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1805 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1806 OP_ALG_AAI_HMAC_PRECOMP,
1807 .geniv = true,
1808 },
1809 },
1810 {
1811 .aead = {
1812 .base = {
1813 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1814 .cra_driver_name = "authenc-hmac-sha224-"
1815 "cbc-aes-caam-qi",
1816 .cra_blocksize = AES_BLOCK_SIZE,
1817 },
1818 .setkey = aead_setkey,
1819 .setauthsize = aead_setauthsize,
1820 .encrypt = aead_encrypt,
1821 .decrypt = aead_decrypt,
1822 .ivsize = AES_BLOCK_SIZE,
1823 .maxauthsize = SHA224_DIGEST_SIZE,
1824 },
1825 .caam = {
1826 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1827 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1828 OP_ALG_AAI_HMAC_PRECOMP,
1829 }
1830 },
1831 {
1832 .aead = {
1833 .base = {
1834 .cra_name = "echainiv(authenc(hmac(sha224),"
1835 "cbc(aes)))",
1836 .cra_driver_name = "echainiv-authenc-"
1837 "hmac-sha224-cbc-aes-caam-qi",
1838 .cra_blocksize = AES_BLOCK_SIZE,
1839 },
1840 .setkey = aead_setkey,
1841 .setauthsize = aead_setauthsize,
1842 .encrypt = aead_encrypt,
1843 .decrypt = aead_decrypt,
1844 .ivsize = AES_BLOCK_SIZE,
1845 .maxauthsize = SHA224_DIGEST_SIZE,
1846 },
1847 .caam = {
1848 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1849 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1850 OP_ALG_AAI_HMAC_PRECOMP,
1851 .geniv = true,
1852 }
1853 },
1854 {
1855 .aead = {
1856 .base = {
1857 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1858 .cra_driver_name = "authenc-hmac-sha256-"
1859 "cbc-aes-caam-qi",
1860 .cra_blocksize = AES_BLOCK_SIZE,
1861 },
1862 .setkey = aead_setkey,
1863 .setauthsize = aead_setauthsize,
1864 .encrypt = aead_encrypt,
1865 .decrypt = aead_decrypt,
1866 .ivsize = AES_BLOCK_SIZE,
1867 .maxauthsize = SHA256_DIGEST_SIZE,
1868 },
1869 .caam = {
1870 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1871 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1872 OP_ALG_AAI_HMAC_PRECOMP,
1873 }
1874 },
1875 {
1876 .aead = {
1877 .base = {
1878 .cra_name = "echainiv(authenc(hmac(sha256),"
1879 "cbc(aes)))",
1880 .cra_driver_name = "echainiv-authenc-"
1881 "hmac-sha256-cbc-aes-"
1882 "caam-qi",
1883 .cra_blocksize = AES_BLOCK_SIZE,
1884 },
1885 .setkey = aead_setkey,
1886 .setauthsize = aead_setauthsize,
1887 .encrypt = aead_encrypt,
1888 .decrypt = aead_decrypt,
1889 .ivsize = AES_BLOCK_SIZE,
1890 .maxauthsize = SHA256_DIGEST_SIZE,
1891 },
1892 .caam = {
1893 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1894 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1895 OP_ALG_AAI_HMAC_PRECOMP,
1896 .geniv = true,
1897 }
1898 },
1899 {
1900 .aead = {
1901 .base = {
1902 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1903 .cra_driver_name = "authenc-hmac-sha384-"
1904 "cbc-aes-caam-qi",
1905 .cra_blocksize = AES_BLOCK_SIZE,
1906 },
1907 .setkey = aead_setkey,
1908 .setauthsize = aead_setauthsize,
1909 .encrypt = aead_encrypt,
1910 .decrypt = aead_decrypt,
1911 .ivsize = AES_BLOCK_SIZE,
1912 .maxauthsize = SHA384_DIGEST_SIZE,
1913 },
1914 .caam = {
1915 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1916 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1917 OP_ALG_AAI_HMAC_PRECOMP,
1918 }
1919 },
1920 {
1921 .aead = {
1922 .base = {
1923 .cra_name = "echainiv(authenc(hmac(sha384),"
1924 "cbc(aes)))",
1925 .cra_driver_name = "echainiv-authenc-"
1926 "hmac-sha384-cbc-aes-"
1927 "caam-qi",
1928 .cra_blocksize = AES_BLOCK_SIZE,
1929 },
1930 .setkey = aead_setkey,
1931 .setauthsize = aead_setauthsize,
1932 .encrypt = aead_encrypt,
1933 .decrypt = aead_decrypt,
1934 .ivsize = AES_BLOCK_SIZE,
1935 .maxauthsize = SHA384_DIGEST_SIZE,
1936 },
1937 .caam = {
1938 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1939 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1940 OP_ALG_AAI_HMAC_PRECOMP,
1941 .geniv = true,
1942 }
1943 },
1944 {
1945 .aead = {
1946 .base = {
1947 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1948 .cra_driver_name = "authenc-hmac-sha512-"
1949 "cbc-aes-caam-qi",
1950 .cra_blocksize = AES_BLOCK_SIZE,
1951 },
1952 .setkey = aead_setkey,
1953 .setauthsize = aead_setauthsize,
1954 .encrypt = aead_encrypt,
1955 .decrypt = aead_decrypt,
1956 .ivsize = AES_BLOCK_SIZE,
1957 .maxauthsize = SHA512_DIGEST_SIZE,
1958 },
1959 .caam = {
1960 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1963 }
1964 },
1965 {
1966 .aead = {
1967 .base = {
1968 .cra_name = "echainiv(authenc(hmac(sha512),"
1969 "cbc(aes)))",
1970 .cra_driver_name = "echainiv-authenc-"
1971 "hmac-sha512-cbc-aes-"
1972 "caam-qi",
1973 .cra_blocksize = AES_BLOCK_SIZE,
1974 },
1975 .setkey = aead_setkey,
1976 .setauthsize = aead_setauthsize,
1977 .encrypt = aead_encrypt,
1978 .decrypt = aead_decrypt,
1979 .ivsize = AES_BLOCK_SIZE,
1980 .maxauthsize = SHA512_DIGEST_SIZE,
1981 },
1982 .caam = {
1983 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1984 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1985 OP_ALG_AAI_HMAC_PRECOMP,
1986 .geniv = true,
1987 }
1988 },
1989 {
1990 .aead = {
1991 .base = {
1992 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1993 .cra_driver_name = "authenc-hmac-md5-"
1994 "cbc-des3_ede-caam-qi",
1995 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1996 },
1997 .setkey = aead_setkey,
1998 .setauthsize = aead_setauthsize,
1999 .encrypt = aead_encrypt,
2000 .decrypt = aead_decrypt,
2001 .ivsize = DES3_EDE_BLOCK_SIZE,
2002 .maxauthsize = MD5_DIGEST_SIZE,
2003 },
2004 .caam = {
2005 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2006 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2007 OP_ALG_AAI_HMAC_PRECOMP,
2008 }
2009 },
2010 {
2011 .aead = {
2012 .base = {
2013 .cra_name = "echainiv(authenc(hmac(md5),"
2014 "cbc(des3_ede)))",
2015 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2016 "cbc-des3_ede-caam-qi",
2017 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2018 },
2019 .setkey = aead_setkey,
2020 .setauthsize = aead_setauthsize,
2021 .encrypt = aead_encrypt,
2022 .decrypt = aead_decrypt,
2023 .ivsize = DES3_EDE_BLOCK_SIZE,
2024 .maxauthsize = MD5_DIGEST_SIZE,
2025 },
2026 .caam = {
2027 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2028 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2029 OP_ALG_AAI_HMAC_PRECOMP,
2030 .geniv = true,
2031 }
2032 },
2033 {
2034 .aead = {
2035 .base = {
2036 .cra_name = "authenc(hmac(sha1),"
2037 "cbc(des3_ede))",
2038 .cra_driver_name = "authenc-hmac-sha1-"
2039 "cbc-des3_ede-caam-qi",
2040 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2041 },
2042 .setkey = aead_setkey,
2043 .setauthsize = aead_setauthsize,
2044 .encrypt = aead_encrypt,
2045 .decrypt = aead_decrypt,
2046 .ivsize = DES3_EDE_BLOCK_SIZE,
2047 .maxauthsize = SHA1_DIGEST_SIZE,
2048 },
2049 .caam = {
2050 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2051 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2052 OP_ALG_AAI_HMAC_PRECOMP,
2053 },
2054 },
2055 {
2056 .aead = {
2057 .base = {
2058 .cra_name = "echainiv(authenc(hmac(sha1),"
2059 "cbc(des3_ede)))",
2060 .cra_driver_name = "echainiv-authenc-"
2061 "hmac-sha1-"
2062 "cbc-des3_ede-caam-qi",
2063 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2064 },
2065 .setkey = aead_setkey,
2066 .setauthsize = aead_setauthsize,
2067 .encrypt = aead_encrypt,
2068 .decrypt = aead_decrypt,
2069 .ivsize = DES3_EDE_BLOCK_SIZE,
2070 .maxauthsize = SHA1_DIGEST_SIZE,
2071 },
2072 .caam = {
2073 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2074 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2075 OP_ALG_AAI_HMAC_PRECOMP,
2076 .geniv = true,
2077 }
2078 },
2079 {
2080 .aead = {
2081 .base = {
2082 .cra_name = "authenc(hmac(sha224),"
2083 "cbc(des3_ede))",
2084 .cra_driver_name = "authenc-hmac-sha224-"
2085 "cbc-des3_ede-caam-qi",
2086 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2087 },
2088 .setkey = aead_setkey,
2089 .setauthsize = aead_setauthsize,
2090 .encrypt = aead_encrypt,
2091 .decrypt = aead_decrypt,
2092 .ivsize = DES3_EDE_BLOCK_SIZE,
2093 .maxauthsize = SHA224_DIGEST_SIZE,
2094 },
2095 .caam = {
2096 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2097 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2098 OP_ALG_AAI_HMAC_PRECOMP,
2099 },
2100 },
2101 {
2102 .aead = {
2103 .base = {
2104 .cra_name = "echainiv(authenc(hmac(sha224),"
2105 "cbc(des3_ede)))",
2106 .cra_driver_name = "echainiv-authenc-"
2107 "hmac-sha224-"
2108 "cbc-des3_ede-caam-qi",
2109 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2110 },
2111 .setkey = aead_setkey,
2112 .setauthsize = aead_setauthsize,
2113 .encrypt = aead_encrypt,
2114 .decrypt = aead_decrypt,
2115 .ivsize = DES3_EDE_BLOCK_SIZE,
2116 .maxauthsize = SHA224_DIGEST_SIZE,
2117 },
2118 .caam = {
2119 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2120 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2121 OP_ALG_AAI_HMAC_PRECOMP,
2122 .geniv = true,
2123 }
2124 },
2125 {
2126 .aead = {
2127 .base = {
2128 .cra_name = "authenc(hmac(sha256),"
2129 "cbc(des3_ede))",
2130 .cra_driver_name = "authenc-hmac-sha256-"
2131 "cbc-des3_ede-caam-qi",
2132 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2133 },
2134 .setkey = aead_setkey,
2135 .setauthsize = aead_setauthsize,
2136 .encrypt = aead_encrypt,
2137 .decrypt = aead_decrypt,
2138 .ivsize = DES3_EDE_BLOCK_SIZE,
2139 .maxauthsize = SHA256_DIGEST_SIZE,
2140 },
2141 .caam = {
2142 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2143 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2144 OP_ALG_AAI_HMAC_PRECOMP,
2145 },
2146 },
2147 {
2148 .aead = {
2149 .base = {
2150 .cra_name = "echainiv(authenc(hmac(sha256),"
2151 "cbc(des3_ede)))",
2152 .cra_driver_name = "echainiv-authenc-"
2153 "hmac-sha256-"
2154 "cbc-des3_ede-caam-qi",
2155 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2156 },
2157 .setkey = aead_setkey,
2158 .setauthsize = aead_setauthsize,
2159 .encrypt = aead_encrypt,
2160 .decrypt = aead_decrypt,
2161 .ivsize = DES3_EDE_BLOCK_SIZE,
2162 .maxauthsize = SHA256_DIGEST_SIZE,
2163 },
2164 .caam = {
2165 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2166 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2167 OP_ALG_AAI_HMAC_PRECOMP,
2168 .geniv = true,
2169 }
2170 },
2171 {
2172 .aead = {
2173 .base = {
2174 .cra_name = "authenc(hmac(sha384),"
2175 "cbc(des3_ede))",
2176 .cra_driver_name = "authenc-hmac-sha384-"
2177 "cbc-des3_ede-caam-qi",
2178 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2179 },
2180 .setkey = aead_setkey,
2181 .setauthsize = aead_setauthsize,
2182 .encrypt = aead_encrypt,
2183 .decrypt = aead_decrypt,
2184 .ivsize = DES3_EDE_BLOCK_SIZE,
2185 .maxauthsize = SHA384_DIGEST_SIZE,
2186 },
2187 .caam = {
2188 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2189 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2190 OP_ALG_AAI_HMAC_PRECOMP,
2191 },
2192 },
2193 {
2194 .aead = {
2195 .base = {
2196 .cra_name = "echainiv(authenc(hmac(sha384),"
2197 "cbc(des3_ede)))",
2198 .cra_driver_name = "echainiv-authenc-"
2199 "hmac-sha384-"
2200 "cbc-des3_ede-caam-qi",
2201 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2202 },
2203 .setkey = aead_setkey,
2204 .setauthsize = aead_setauthsize,
2205 .encrypt = aead_encrypt,
2206 .decrypt = aead_decrypt,
2207 .ivsize = DES3_EDE_BLOCK_SIZE,
2208 .maxauthsize = SHA384_DIGEST_SIZE,
2209 },
2210 .caam = {
2211 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2212 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2213 OP_ALG_AAI_HMAC_PRECOMP,
2214 .geniv = true,
2215 }
2216 },
2217 {
2218 .aead = {
2219 .base = {
2220 .cra_name = "authenc(hmac(sha512),"
2221 "cbc(des3_ede))",
2222 .cra_driver_name = "authenc-hmac-sha512-"
2223 "cbc-des3_ede-caam-qi",
2224 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2225 },
2226 .setkey = aead_setkey,
2227 .setauthsize = aead_setauthsize,
2228 .encrypt = aead_encrypt,
2229 .decrypt = aead_decrypt,
2230 .ivsize = DES3_EDE_BLOCK_SIZE,
2231 .maxauthsize = SHA512_DIGEST_SIZE,
2232 },
2233 .caam = {
2234 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2235 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2236 OP_ALG_AAI_HMAC_PRECOMP,
2237 },
2238 },
2239 {
2240 .aead = {
2241 .base = {
2242 .cra_name = "echainiv(authenc(hmac(sha512),"
2243 "cbc(des3_ede)))",
2244 .cra_driver_name = "echainiv-authenc-"
2245 "hmac-sha512-"
2246 "cbc-des3_ede-caam-qi",
2247 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2248 },
2249 .setkey = aead_setkey,
2250 .setauthsize = aead_setauthsize,
2251 .encrypt = aead_encrypt,
2252 .decrypt = aead_decrypt,
2253 .ivsize = DES3_EDE_BLOCK_SIZE,
2254 .maxauthsize = SHA512_DIGEST_SIZE,
2255 },
2256 .caam = {
2257 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2258 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2259 OP_ALG_AAI_HMAC_PRECOMP,
2260 .geniv = true,
2261 }
2262 },
2263 {
2264 .aead = {
2265 .base = {
2266 .cra_name = "authenc(hmac(md5),cbc(des))",
2267 .cra_driver_name = "authenc-hmac-md5-"
2268 "cbc-des-caam-qi",
2269 .cra_blocksize = DES_BLOCK_SIZE,
2270 },
2271 .setkey = aead_setkey,
2272 .setauthsize = aead_setauthsize,
2273 .encrypt = aead_encrypt,
2274 .decrypt = aead_decrypt,
2275 .ivsize = DES_BLOCK_SIZE,
2276 .maxauthsize = MD5_DIGEST_SIZE,
2277 },
2278 .caam = {
2279 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2280 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2281 OP_ALG_AAI_HMAC_PRECOMP,
2282 },
2283 },
2284 {
2285 .aead = {
2286 .base = {
2287 .cra_name = "echainiv(authenc(hmac(md5),"
2288 "cbc(des)))",
2289 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2290 "cbc-des-caam-qi",
2291 .cra_blocksize = DES_BLOCK_SIZE,
2292 },
2293 .setkey = aead_setkey,
2294 .setauthsize = aead_setauthsize,
2295 .encrypt = aead_encrypt,
2296 .decrypt = aead_decrypt,
2297 .ivsize = DES_BLOCK_SIZE,
2298 .maxauthsize = MD5_DIGEST_SIZE,
2299 },
2300 .caam = {
2301 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2302 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2303 OP_ALG_AAI_HMAC_PRECOMP,
2304 .geniv = true,
2305 }
2306 },
2307 {
2308 .aead = {
2309 .base = {
2310 .cra_name = "authenc(hmac(sha1),cbc(des))",
2311 .cra_driver_name = "authenc-hmac-sha1-"
2312 "cbc-des-caam-qi",
2313 .cra_blocksize = DES_BLOCK_SIZE,
2314 },
2315 .setkey = aead_setkey,
2316 .setauthsize = aead_setauthsize,
2317 .encrypt = aead_encrypt,
2318 .decrypt = aead_decrypt,
2319 .ivsize = DES_BLOCK_SIZE,
2320 .maxauthsize = SHA1_DIGEST_SIZE,
2321 },
2322 .caam = {
2323 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2324 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2325 OP_ALG_AAI_HMAC_PRECOMP,
2326 },
2327 },
2328 {
2329 .aead = {
2330 .base = {
2331 .cra_name = "echainiv(authenc(hmac(sha1),"
2332 "cbc(des)))",
2333 .cra_driver_name = "echainiv-authenc-"
2334 "hmac-sha1-cbc-des-caam-qi",
2335 .cra_blocksize = DES_BLOCK_SIZE,
2336 },
2337 .setkey = aead_setkey,
2338 .setauthsize = aead_setauthsize,
2339 .encrypt = aead_encrypt,
2340 .decrypt = aead_decrypt,
2341 .ivsize = DES_BLOCK_SIZE,
2342 .maxauthsize = SHA1_DIGEST_SIZE,
2343 },
2344 .caam = {
2345 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2346 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2347 OP_ALG_AAI_HMAC_PRECOMP,
2348 .geniv = true,
2349 }
2350 },
2351 {
2352 .aead = {
2353 .base = {
2354 .cra_name = "authenc(hmac(sha224),cbc(des))",
2355 .cra_driver_name = "authenc-hmac-sha224-"
2356 "cbc-des-caam-qi",
2357 .cra_blocksize = DES_BLOCK_SIZE,
2358 },
2359 .setkey = aead_setkey,
2360 .setauthsize = aead_setauthsize,
2361 .encrypt = aead_encrypt,
2362 .decrypt = aead_decrypt,
2363 .ivsize = DES_BLOCK_SIZE,
2364 .maxauthsize = SHA224_DIGEST_SIZE,
2365 },
2366 .caam = {
2367 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2368 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2369 OP_ALG_AAI_HMAC_PRECOMP,
2370 },
2371 },
2372 {
2373 .aead = {
2374 .base = {
2375 .cra_name = "echainiv(authenc(hmac(sha224),"
2376 "cbc(des)))",
2377 .cra_driver_name = "echainiv-authenc-"
2378 "hmac-sha224-cbc-des-"
2379 "caam-qi",
2380 .cra_blocksize = DES_BLOCK_SIZE,
2381 },
2382 .setkey = aead_setkey,
2383 .setauthsize = aead_setauthsize,
2384 .encrypt = aead_encrypt,
2385 .decrypt = aead_decrypt,
2386 .ivsize = DES_BLOCK_SIZE,
2387 .maxauthsize = SHA224_DIGEST_SIZE,
2388 },
2389 .caam = {
2390 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2391 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2392 OP_ALG_AAI_HMAC_PRECOMP,
2393 .geniv = true,
2394 }
2395 },
2396 {
2397 .aead = {
2398 .base = {
2399 .cra_name = "authenc(hmac(sha256),cbc(des))",
2400 .cra_driver_name = "authenc-hmac-sha256-"
2401 "cbc-des-caam-qi",
2402 .cra_blocksize = DES_BLOCK_SIZE,
2403 },
2404 .setkey = aead_setkey,
2405 .setauthsize = aead_setauthsize,
2406 .encrypt = aead_encrypt,
2407 .decrypt = aead_decrypt,
2408 .ivsize = DES_BLOCK_SIZE,
2409 .maxauthsize = SHA256_DIGEST_SIZE,
2410 },
2411 .caam = {
2412 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2413 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2414 OP_ALG_AAI_HMAC_PRECOMP,
2415 },
2416 },
2417 {
2418 .aead = {
2419 .base = {
2420 .cra_name = "echainiv(authenc(hmac(sha256),"
2421 "cbc(des)))",
2422 .cra_driver_name = "echainiv-authenc-"
Horia Geantă84ea9542017-07-10 08:40:27 +03002423 "hmac-sha256-cbc-des-"
Horia Geantăb1898172017-03-17 12:06:02 +02002424 "caam-qi",
2425 .cra_blocksize = DES_BLOCK_SIZE,
2426 },
2427 .setkey = aead_setkey,
2428 .setauthsize = aead_setauthsize,
2429 .encrypt = aead_encrypt,
2430 .decrypt = aead_decrypt,
2431 .ivsize = DES_BLOCK_SIZE,
2432 .maxauthsize = SHA256_DIGEST_SIZE,
2433 },
2434 .caam = {
2435 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2436 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2437 OP_ALG_AAI_HMAC_PRECOMP,
2438 .geniv = true,
2439 },
2440 },
2441 {
2442 .aead = {
2443 .base = {
2444 .cra_name = "authenc(hmac(sha384),cbc(des))",
2445 .cra_driver_name = "authenc-hmac-sha384-"
2446 "cbc-des-caam-qi",
2447 .cra_blocksize = DES_BLOCK_SIZE,
2448 },
2449 .setkey = aead_setkey,
2450 .setauthsize = aead_setauthsize,
2451 .encrypt = aead_encrypt,
2452 .decrypt = aead_decrypt,
2453 .ivsize = DES_BLOCK_SIZE,
2454 .maxauthsize = SHA384_DIGEST_SIZE,
2455 },
2456 .caam = {
2457 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2458 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2459 OP_ALG_AAI_HMAC_PRECOMP,
2460 },
2461 },
2462 {
2463 .aead = {
2464 .base = {
2465 .cra_name = "echainiv(authenc(hmac(sha384),"
2466 "cbc(des)))",
2467 .cra_driver_name = "echainiv-authenc-"
2468 "hmac-sha384-cbc-des-"
2469 "caam-qi",
2470 .cra_blocksize = DES_BLOCK_SIZE,
2471 },
2472 .setkey = aead_setkey,
2473 .setauthsize = aead_setauthsize,
2474 .encrypt = aead_encrypt,
2475 .decrypt = aead_decrypt,
2476 .ivsize = DES_BLOCK_SIZE,
2477 .maxauthsize = SHA384_DIGEST_SIZE,
2478 },
2479 .caam = {
2480 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2481 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2482 OP_ALG_AAI_HMAC_PRECOMP,
2483 .geniv = true,
2484 }
2485 },
2486 {
2487 .aead = {
2488 .base = {
2489 .cra_name = "authenc(hmac(sha512),cbc(des))",
2490 .cra_driver_name = "authenc-hmac-sha512-"
2491 "cbc-des-caam-qi",
2492 .cra_blocksize = DES_BLOCK_SIZE,
2493 },
2494 .setkey = aead_setkey,
2495 .setauthsize = aead_setauthsize,
2496 .encrypt = aead_encrypt,
2497 .decrypt = aead_decrypt,
2498 .ivsize = DES_BLOCK_SIZE,
2499 .maxauthsize = SHA512_DIGEST_SIZE,
2500 },
2501 .caam = {
2502 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2503 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2504 OP_ALG_AAI_HMAC_PRECOMP,
2505 }
2506 },
2507 {
2508 .aead = {
2509 .base = {
2510 .cra_name = "echainiv(authenc(hmac(sha512),"
2511 "cbc(des)))",
2512 .cra_driver_name = "echainiv-authenc-"
2513 "hmac-sha512-cbc-des-"
2514 "caam-qi",
2515 .cra_blocksize = DES_BLOCK_SIZE,
2516 },
2517 .setkey = aead_setkey,
2518 .setauthsize = aead_setauthsize,
2519 .encrypt = aead_encrypt,
2520 .decrypt = aead_decrypt,
2521 .ivsize = DES_BLOCK_SIZE,
2522 .maxauthsize = SHA512_DIGEST_SIZE,
2523 },
2524 .caam = {
2525 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2526 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2527 OP_ALG_AAI_HMAC_PRECOMP,
2528 .geniv = true,
2529 }
2530 },
2531};
2532
2533struct caam_crypto_alg {
2534 struct list_head entry;
2535 struct crypto_alg crypto_alg;
2536 struct caam_alg_entry caam;
2537};
2538
Horia Geantă7e0880b2017-12-19 12:16:07 +02002539static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2540 bool uses_dkp)
Horia Geantăb1898172017-03-17 12:06:02 +02002541{
2542 struct caam_drv_private *priv;
2543
2544 /*
2545 * distribute tfms across job rings to ensure in-order
2546 * crypto request processing per tfm
2547 */
2548 ctx->jrdev = caam_jr_alloc();
2549 if (IS_ERR(ctx->jrdev)) {
2550 pr_err("Job Ring Device allocation for transform failed\n");
2551 return PTR_ERR(ctx->jrdev);
2552 }
2553
Horia Geantă7e0880b2017-12-19 12:16:07 +02002554 priv = dev_get_drvdata(ctx->jrdev->parent);
2555 if (priv->era >= 6 && uses_dkp)
2556 ctx->dir = DMA_BIDIRECTIONAL;
2557 else
2558 ctx->dir = DMA_TO_DEVICE;
2559
Horia Geantăb1898172017-03-17 12:06:02 +02002560 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
Horia Geantă7e0880b2017-12-19 12:16:07 +02002561 ctx->dir);
Horia Geantăb1898172017-03-17 12:06:02 +02002562 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2563 dev_err(ctx->jrdev, "unable to map key\n");
2564 caam_jr_free(ctx->jrdev);
2565 return -ENOMEM;
2566 }
2567
2568 /* copy descriptor header template value */
2569 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2570 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2571
Horia Geantăb1898172017-03-17 12:06:02 +02002572 ctx->qidev = priv->qidev;
2573
2574 spin_lock_init(&ctx->lock);
2575 ctx->drv_ctx[ENCRYPT] = NULL;
2576 ctx->drv_ctx[DECRYPT] = NULL;
2577 ctx->drv_ctx[GIVENCRYPT] = NULL;
2578
2579 return 0;
2580}
2581
2582static int caam_cra_init(struct crypto_tfm *tfm)
2583{
2584 struct crypto_alg *alg = tfm->__crt_alg;
2585 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2586 crypto_alg);
2587 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2588
Horia Geantă7e0880b2017-12-19 12:16:07 +02002589 return caam_init_common(ctx, &caam_alg->caam, false);
Horia Geantăb1898172017-03-17 12:06:02 +02002590}
2591
2592static int caam_aead_init(struct crypto_aead *tfm)
2593{
2594 struct aead_alg *alg = crypto_aead_alg(tfm);
2595 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2596 aead);
2597 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2598
Horia Geantă7e0880b2017-12-19 12:16:07 +02002599 return caam_init_common(ctx, &caam_alg->caam,
2600 alg->setkey == aead_setkey);
Horia Geantăb1898172017-03-17 12:06:02 +02002601}
2602
2603static void caam_exit_common(struct caam_ctx *ctx)
2604{
2605 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2606 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2607 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2608
Horia Geantă7e0880b2017-12-19 12:16:07 +02002609 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
Horia Geantăb1898172017-03-17 12:06:02 +02002610
2611 caam_jr_free(ctx->jrdev);
2612}
2613
2614static void caam_cra_exit(struct crypto_tfm *tfm)
2615{
2616 caam_exit_common(crypto_tfm_ctx(tfm));
2617}
2618
2619static void caam_aead_exit(struct crypto_aead *tfm)
2620{
2621 caam_exit_common(crypto_aead_ctx(tfm));
2622}
2623
2624static struct list_head alg_list;
2625static void __exit caam_qi_algapi_exit(void)
2626{
2627 struct caam_crypto_alg *t_alg, *n;
2628 int i;
2629
2630 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2631 struct caam_aead_alg *t_alg = driver_aeads + i;
2632
2633 if (t_alg->registered)
2634 crypto_unregister_aead(&t_alg->aead);
2635 }
2636
2637 if (!alg_list.next)
2638 return;
2639
2640 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2641 crypto_unregister_alg(&t_alg->crypto_alg);
2642 list_del(&t_alg->entry);
2643 kfree(t_alg);
2644 }
2645}
2646
2647static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2648 *template)
2649{
2650 struct caam_crypto_alg *t_alg;
2651 struct crypto_alg *alg;
2652
2653 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2654 if (!t_alg)
2655 return ERR_PTR(-ENOMEM);
2656
2657 alg = &t_alg->crypto_alg;
2658
2659 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2660 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2661 template->driver_name);
2662 alg->cra_module = THIS_MODULE;
2663 alg->cra_init = caam_cra_init;
2664 alg->cra_exit = caam_cra_exit;
2665 alg->cra_priority = CAAM_CRA_PRIORITY;
2666 alg->cra_blocksize = template->blocksize;
2667 alg->cra_alignmask = 0;
2668 alg->cra_ctxsize = sizeof(struct caam_ctx);
2669 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2670 template->type;
2671 switch (template->type) {
2672 case CRYPTO_ALG_TYPE_GIVCIPHER:
2673 alg->cra_type = &crypto_givcipher_type;
2674 alg->cra_ablkcipher = template->template_ablkcipher;
2675 break;
2676 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2677 alg->cra_type = &crypto_ablkcipher_type;
2678 alg->cra_ablkcipher = template->template_ablkcipher;
2679 break;
2680 }
2681
2682 t_alg->caam.class1_alg_type = template->class1_alg_type;
2683 t_alg->caam.class2_alg_type = template->class2_alg_type;
2684
2685 return t_alg;
2686}
2687
2688static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2689{
2690 struct aead_alg *alg = &t_alg->aead;
2691
2692 alg->base.cra_module = THIS_MODULE;
2693 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2694 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2695 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2696
2697 alg->init = caam_aead_init;
2698 alg->exit = caam_aead_exit;
2699}
2700
2701static int __init caam_qi_algapi_init(void)
2702{
2703 struct device_node *dev_node;
2704 struct platform_device *pdev;
2705 struct device *ctrldev;
2706 struct caam_drv_private *priv;
2707 int i = 0, err = 0;
2708 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2709 unsigned int md_limit = SHA512_DIGEST_SIZE;
2710 bool registered = false;
2711
2712 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2713 if (!dev_node) {
2714 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2715 if (!dev_node)
2716 return -ENODEV;
2717 }
2718
2719 pdev = of_find_device_by_node(dev_node);
2720 of_node_put(dev_node);
2721 if (!pdev)
2722 return -ENODEV;
2723
2724 ctrldev = &pdev->dev;
2725 priv = dev_get_drvdata(ctrldev);
2726
2727 /*
2728 * If priv is NULL, it's probably because the caam driver wasn't
2729 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2730 */
2731 if (!priv || !priv->qi_present)
2732 return -ENODEV;
2733
Horia Geantăd3b5a872017-10-24 09:27:31 +03002734 if (caam_dpaa2) {
2735 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2736 return -ENODEV;
2737 }
2738
Horia Geantăb1898172017-03-17 12:06:02 +02002739 INIT_LIST_HEAD(&alg_list);
2740
2741 /*
2742 * Register crypto algorithms the device supports.
2743 * First, detect presence and attributes of DES, AES, and MD blocks.
2744 */
2745 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2746 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2747 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2748 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2749 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2750
2751 /* If MD is present, limit digest size based on LP256 */
2752 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2753 md_limit = SHA256_DIGEST_SIZE;
2754
2755 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2756 struct caam_crypto_alg *t_alg;
2757 struct caam_alg_template *alg = driver_algs + i;
2758 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2759
2760 /* Skip DES algorithms if not supported by device */
2761 if (!des_inst &&
2762 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2763 (alg_sel == OP_ALG_ALGSEL_DES)))
2764 continue;
2765
2766 /* Skip AES algorithms if not supported by device */
2767 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2768 continue;
2769
2770 t_alg = caam_alg_alloc(alg);
2771 if (IS_ERR(t_alg)) {
2772 err = PTR_ERR(t_alg);
2773 dev_warn(priv->qidev, "%s alg allocation failed\n",
2774 alg->driver_name);
2775 continue;
2776 }
2777
2778 err = crypto_register_alg(&t_alg->crypto_alg);
2779 if (err) {
2780 dev_warn(priv->qidev, "%s alg registration failed\n",
2781 t_alg->crypto_alg.cra_driver_name);
2782 kfree(t_alg);
2783 continue;
2784 }
2785
2786 list_add_tail(&t_alg->entry, &alg_list);
2787 registered = true;
2788 }
2789
2790 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2791 struct caam_aead_alg *t_alg = driver_aeads + i;
2792 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2793 OP_ALG_ALGSEL_MASK;
2794 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2795 OP_ALG_ALGSEL_MASK;
2796 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2797
2798 /* Skip DES algorithms if not supported by device */
2799 if (!des_inst &&
2800 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2801 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2802 continue;
2803
2804 /* Skip AES algorithms if not supported by device */
2805 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2806 continue;
2807
2808 /*
2809 * Check support for AES algorithms not available
2810 * on LP devices.
2811 */
2812 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2813 (alg_aai == OP_ALG_AAI_GCM))
2814 continue;
2815
2816 /*
2817 * Skip algorithms requiring message digests
2818 * if MD or MD size is not supported by device.
2819 */
2820 if (c2_alg_sel &&
2821 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2822 continue;
2823
2824 caam_aead_alg_init(t_alg);
2825
2826 err = crypto_register_aead(&t_alg->aead);
2827 if (err) {
2828 pr_warn("%s alg registration failed\n",
2829 t_alg->aead.base.cra_driver_name);
2830 continue;
2831 }
2832
2833 t_alg->registered = true;
2834 registered = true;
2835 }
2836
2837 if (registered)
2838 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2839
2840 return err;
2841}
2842
2843module_init(caam_qi_algapi_init);
2844module_exit(caam_qi_algapi_exit);
2845
2846MODULE_LICENSE("GPL");
2847MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2848MODULE_AUTHOR("Freescale Semiconductor");