blob: 5ab480a12b517c6396a499fa6df69761a20e5701 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang6ec47332012-06-22 19:48:43 -050068#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
Yuan Kang1acebad2011-07-15 11:21:42 +080069
70#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
71#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
72#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
73#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
74
Yuan Kangacdca312011-07-15 11:21:42 +080075#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
76#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
77 20 * CAAM_CMD_SZ)
78#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
79 15 * CAAM_CMD_SZ)
80
Yuan Kang1acebad2011-07-15 11:21:42 +080081#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
82 CAAM_MAX_KEY_SIZE)
83#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050084
Kim Phillips8e8ec592011-03-13 16:54:26 +080085#ifdef DEBUG
86/* for print_hex_dumps with line references */
87#define xstr(s) str(s)
88#define str(s) #s
89#define debug(format, arg...) printk(format, arg)
90#else
91#define debug(format, arg...)
92#endif
93
Yuan Kang1acebad2011-07-15 11:21:42 +080094/* Set DK bit in class 1 operation if shared */
95static inline void append_dec_op1(u32 *desc, u32 type)
96{
97 u32 *jump_cmd, *uncond_jump_cmd;
98
99 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
100 append_operation(desc, type | OP_ALG_AS_INITFINAL |
101 OP_ALG_DECRYPT);
102 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
103 set_jump_tgt_here(desc, jump_cmd);
104 append_operation(desc, type | OP_ALG_AS_INITFINAL |
105 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
106 set_jump_tgt_here(desc, uncond_jump_cmd);
107}
108
109/*
110 * Wait for completion of class 1 key loading before allowing
111 * error propagation
112 */
113static inline void append_dec_shr_done(u32 *desc)
114{
115 u32 *jump_cmd;
116
117 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
118 set_jump_tgt_here(desc, jump_cmd);
Kim Phillipsa2ecb152011-12-12 14:59:15 -0600119 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
Yuan Kang1acebad2011-07-15 11:21:42 +0800120}
121
122/*
123 * For aead functions, read payload and write payload,
124 * both of which are specified in req->src and req->dst
125 */
126static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
127{
128 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
129 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
130 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131}
132
133/*
134 * For aead encrypt and decrypt, read iv for both classes
135 */
136static inline void aead_append_ld_iv(u32 *desc, int ivsize)
137{
138 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
139 LDST_CLASS_1_CCB | ivsize);
140 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141}
142
143/*
Yuan Kangacdca312011-07-15 11:21:42 +0800144 * For ablkcipher encrypt and decrypt, read from req->src and
145 * write to req->dst
146 */
147static inline void ablkcipher_append_src_dst(u32 *desc)
148{
Kim Phillips70d793c2012-06-22 19:42:35 -0500149 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
150 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
151 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
152 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
153 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800154}
155
156/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800157 * If all data, including src (with assoc and iv) or dst (with iv only) are
158 * contiguous
159 */
160#define GIV_SRC_CONTIG 1
161#define GIV_DST_CONTIG (1 << 1)
162
Kim Phillips8e8ec592011-03-13 16:54:26 +0800163/*
164 * per-session context
165 */
166struct caam_ctx {
167 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800168 u32 sh_desc_enc[DESC_MAX_USED_LEN];
169 u32 sh_desc_dec[DESC_MAX_USED_LEN];
170 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
171 dma_addr_t sh_desc_enc_dma;
172 dma_addr_t sh_desc_dec_dma;
173 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800174 u32 class1_alg_type;
175 u32 class2_alg_type;
176 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800177 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800178 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800179 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800180 unsigned int split_key_len;
181 unsigned int split_key_pad_len;
182 unsigned int authsize;
183};
184
Yuan Kang1acebad2011-07-15 11:21:42 +0800185static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
186 int keys_fit_inline)
187{
188 if (keys_fit_inline) {
189 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
190 ctx->split_key_len, CLASS_2 |
191 KEY_DEST_MDHA_SPLIT | KEY_ENC);
192 append_key_as_imm(desc, (void *)ctx->key +
193 ctx->split_key_pad_len, ctx->enckeylen,
194 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195 } else {
196 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
197 KEY_DEST_MDHA_SPLIT | KEY_ENC);
198 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
199 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200 }
201}
202
203static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
204 int keys_fit_inline)
205{
206 u32 *key_jump_cmd;
207
208 init_sh_desc(desc, HDR_SHARE_WAIT);
209
210 /* Skip if already shared */
211 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
212 JUMP_COND_SHRD);
213
214 append_key_aead(desc, ctx, keys_fit_inline);
215
216 set_jump_tgt_here(desc, key_jump_cmd);
217
218 /* Propagate errors from shared to job descriptor */
Kim Phillipsa2ecb152011-12-12 14:59:15 -0600219 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
Yuan Kang1acebad2011-07-15 11:21:42 +0800220}
221
222static int aead_set_sh_desc(struct crypto_aead *aead)
223{
224 struct aead_tfm *tfm = &aead->base.crt_aead;
225 struct caam_ctx *ctx = crypto_aead_ctx(aead);
226 struct device *jrdev = ctx->jrdev;
227 bool keys_fit_inline = 0;
228 u32 *key_jump_cmd, *jump_cmd;
229 u32 geniv, moveiv;
230 u32 *desc;
231
232 if (!ctx->enckeylen || !ctx->authsize)
233 return 0;
234
235 /*
236 * Job Descriptor and Shared Descriptors
237 * must all fit into the 64-word Descriptor h/w Buffer
238 */
239 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
240 ctx->split_key_pad_len + ctx->enckeylen <=
241 CAAM_DESC_BYTES_MAX)
242 keys_fit_inline = 1;
243
244 /* aead_encrypt shared descriptor */
245 desc = ctx->sh_desc_enc;
246
247 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
248
249 /* Class 2 operation */
250 append_operation(desc, ctx->class2_alg_type |
251 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
252
253 /* cryptlen = seqoutlen - authsize */
254 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
255
256 /* assoclen + cryptlen = seqinlen - ivsize */
257 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
258
259 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
260 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
261
262 /* read assoc before reading payload */
263 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
264 KEY_VLF);
265 aead_append_ld_iv(desc, tfm->ivsize);
266
267 /* Class 1 operation */
268 append_operation(desc, ctx->class1_alg_type |
269 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
270
271 /* Read and write cryptlen bytes */
272 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
273 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
274 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
275
276 /* Write ICV */
277 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
278 LDST_SRCDST_BYTE_CONTEXT);
279
280 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
281 desc_bytes(desc),
282 DMA_TO_DEVICE);
283 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
284 dev_err(jrdev, "unable to map shared descriptor\n");
285 return -ENOMEM;
286 }
287#ifdef DEBUG
288 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
289 DUMP_PREFIX_ADDRESS, 16, 4, desc,
290 desc_bytes(desc), 1);
291#endif
292
293 /*
294 * Job Descriptor and Shared Descriptors
295 * must all fit into the 64-word Descriptor h/w Buffer
296 */
297 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
298 ctx->split_key_pad_len + ctx->enckeylen <=
299 CAAM_DESC_BYTES_MAX)
300 keys_fit_inline = 1;
301
302 desc = ctx->sh_desc_dec;
303
304 /* aead_decrypt shared descriptor */
305 init_sh_desc(desc, HDR_SHARE_WAIT);
306
307 /* Skip if already shared */
308 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
309 JUMP_COND_SHRD);
310
311 append_key_aead(desc, ctx, keys_fit_inline);
312
313 /* Only propagate error immediately if shared */
314 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
315 set_jump_tgt_here(desc, key_jump_cmd);
Kim Phillipsa2ecb152011-12-12 14:59:15 -0600316 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
Yuan Kang1acebad2011-07-15 11:21:42 +0800317 set_jump_tgt_here(desc, jump_cmd);
318
319 /* Class 2 operation */
320 append_operation(desc, ctx->class2_alg_type |
321 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
322
323 /* assoclen + cryptlen = seqinlen - ivsize */
324 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
325 ctx->authsize + tfm->ivsize)
326 /* assoclen = (assoclen + cryptlen) - cryptlen */
327 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
328 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
329
330 /* read assoc before reading payload */
331 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
332 KEY_VLF);
333
334 aead_append_ld_iv(desc, tfm->ivsize);
335
336 append_dec_op1(desc, ctx->class1_alg_type);
337
338 /* Read and write cryptlen bytes */
339 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
340 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
341 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
342
343 /* Load ICV */
344 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
345 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
346 append_dec_shr_done(desc);
347
348 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
349 desc_bytes(desc),
350 DMA_TO_DEVICE);
351 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
352 dev_err(jrdev, "unable to map shared descriptor\n");
353 return -ENOMEM;
354 }
355#ifdef DEBUG
356 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1);
359#endif
360
361 /*
362 * Job Descriptor and Shared Descriptors
363 * must all fit into the 64-word Descriptor h/w Buffer
364 */
365 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
366 ctx->split_key_pad_len + ctx->enckeylen <=
367 CAAM_DESC_BYTES_MAX)
368 keys_fit_inline = 1;
369
370 /* aead_givencrypt shared descriptor */
371 desc = ctx->sh_desc_givenc;
372
373 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
374
375 /* Generate IV */
376 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
377 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
378 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
379 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
380 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
381 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
382 append_move(desc, MOVE_SRC_INFIFO |
383 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
384 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
385
386 /* Copy IV to class 1 context */
387 append_move(desc, MOVE_SRC_CLASS1CTX |
388 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
389
390 /* Return to encryption */
391 append_operation(desc, ctx->class2_alg_type |
392 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
393
394 /* ivsize + cryptlen = seqoutlen - authsize */
395 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
396
397 /* assoclen = seqinlen - (ivsize + cryptlen) */
398 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
399
400 /* read assoc before reading payload */
401 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
402 KEY_VLF);
403
404 /* Copy iv from class 1 ctx to class 2 fifo*/
405 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
406 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
407 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
408 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
409 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
410 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
411
412 /* Class 1 operation */
413 append_operation(desc, ctx->class1_alg_type |
414 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
415
416 /* Will write ivsize + cryptlen */
417 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
418
419 /* Not need to reload iv */
420 append_seq_fifo_load(desc, tfm->ivsize,
421 FIFOLD_CLASS_SKIP);
422
423 /* Will read cryptlen */
424 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
425 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
426
427 /* Write ICV */
428 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
429 LDST_SRCDST_BYTE_CONTEXT);
430
431 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
432 desc_bytes(desc),
433 DMA_TO_DEVICE);
434 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
435 dev_err(jrdev, "unable to map shared descriptor\n");
436 return -ENOMEM;
437 }
438#ifdef DEBUG
439 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
440 DUMP_PREFIX_ADDRESS, 16, 4, desc,
441 desc_bytes(desc), 1);
442#endif
443
444 return 0;
445}
446
Yuan Kang0e479302011-07-15 11:21:41 +0800447static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800448 unsigned int authsize)
449{
450 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
451
452 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800453 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800454
455 return 0;
456}
457
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500458static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
459 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800460{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500461 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
462 ctx->split_key_pad_len, key_in, authkeylen,
463 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800464}
465
Yuan Kang0e479302011-07-15 11:21:41 +0800466static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800467 const u8 *key, unsigned int keylen)
468{
469 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
470 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
471 struct caam_ctx *ctx = crypto_aead_ctx(aead);
472 struct device *jrdev = ctx->jrdev;
473 struct rtattr *rta = (void *)key;
474 struct crypto_authenc_key_param *param;
475 unsigned int authkeylen;
476 unsigned int enckeylen;
477 int ret = 0;
478
479 param = RTA_DATA(rta);
480 enckeylen = be32_to_cpu(param->enckeylen);
481
482 key += RTA_ALIGN(rta->rta_len);
483 keylen -= RTA_ALIGN(rta->rta_len);
484
485 if (keylen < enckeylen)
486 goto badkey;
487
488 authkeylen = keylen - enckeylen;
489
490 if (keylen > CAAM_MAX_KEY_SIZE)
491 goto badkey;
492
493 /* Pick class 2 key length from algorithm submask */
494 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
495 OP_ALG_ALGSEL_SHIFT] * 2;
496 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
497
498#ifdef DEBUG
499 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
500 keylen, enckeylen, authkeylen);
501 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
502 ctx->split_key_len, ctx->split_key_pad_len);
503 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
504 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
505#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +0800506
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500507 ret = gen_split_aead_key(ctx, key, authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800508 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800509 goto badkey;
510 }
511
512 /* postpend encryption key to auth split key */
513 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
514
Yuan Kang885e9e22011-07-15 11:21:41 +0800515 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800516 enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +0800517 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800518 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +0800519 return -ENOMEM;
520 }
521#ifdef DEBUG
522 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
523 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
524 ctx->split_key_pad_len + enckeylen, 1);
525#endif
526
Kim Phillips8e8ec592011-03-13 16:54:26 +0800527 ctx->enckeylen = enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800528
Yuan Kang1acebad2011-07-15 11:21:42 +0800529 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800530 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +0800531 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800532 enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800533 }
534
535 return ret;
536badkey:
537 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 return -EINVAL;
539}
540
Yuan Kangacdca312011-07-15 11:21:42 +0800541static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
542 const u8 *key, unsigned int keylen)
543{
544 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
545 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
546 struct device *jrdev = ctx->jrdev;
547 int ret = 0;
548 u32 *key_jump_cmd, *jump_cmd;
549 u32 *desc;
550
551#ifdef DEBUG
552 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
553 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554#endif
555
556 memcpy(ctx->key, key, keylen);
557 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
558 DMA_TO_DEVICE);
559 if (dma_mapping_error(jrdev, ctx->key_dma)) {
560 dev_err(jrdev, "unable to map key i/o memory\n");
561 return -ENOMEM;
562 }
563 ctx->enckeylen = keylen;
564
565 /* ablkcipher_encrypt shared descriptor */
566 desc = ctx->sh_desc_enc;
567 init_sh_desc(desc, HDR_SHARE_WAIT);
568 /* Skip if already shared */
569 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
570 JUMP_COND_SHRD);
571
572 /* Load class1 key only */
573 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
574 ctx->enckeylen, CLASS_1 |
575 KEY_DEST_CLASS_REG);
576
577 set_jump_tgt_here(desc, key_jump_cmd);
578
579 /* Propagate errors from shared to job descriptor */
Kim Phillipsa2ecb152011-12-12 14:59:15 -0600580 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
Yuan Kangacdca312011-07-15 11:21:42 +0800581
582 /* Load iv */
583 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
584 LDST_CLASS_1_CCB | tfm->ivsize);
585
586 /* Load operation */
587 append_operation(desc, ctx->class1_alg_type |
588 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
589
590 /* Perform operation */
591 ablkcipher_append_src_dst(desc);
592
593 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
594 desc_bytes(desc),
595 DMA_TO_DEVICE);
596 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
597 dev_err(jrdev, "unable to map shared descriptor\n");
598 return -ENOMEM;
599 }
600#ifdef DEBUG
601 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
602 DUMP_PREFIX_ADDRESS, 16, 4, desc,
603 desc_bytes(desc), 1);
604#endif
605 /* ablkcipher_decrypt shared descriptor */
606 desc = ctx->sh_desc_dec;
607
608 init_sh_desc(desc, HDR_SHARE_WAIT);
609 /* Skip if already shared */
610 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
611 JUMP_COND_SHRD);
612
613 /* Load class1 key only */
614 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
615 ctx->enckeylen, CLASS_1 |
616 KEY_DEST_CLASS_REG);
617
618 /* For aead, only propagate error immediately if shared */
619 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
620 set_jump_tgt_here(desc, key_jump_cmd);
Kim Phillipsa2ecb152011-12-12 14:59:15 -0600621 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
Yuan Kangacdca312011-07-15 11:21:42 +0800622 set_jump_tgt_here(desc, jump_cmd);
623
624 /* load IV */
625 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
626 LDST_CLASS_1_CCB | tfm->ivsize);
627
628 /* Choose operation */
629 append_dec_op1(desc, ctx->class1_alg_type);
630
631 /* Perform operation */
632 ablkcipher_append_src_dst(desc);
633
634 /* Wait for key to load before allowing propagating error */
635 append_dec_shr_done(desc);
636
637 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638 desc_bytes(desc),
639 DMA_TO_DEVICE);
640 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
641 dev_err(jrdev, "unable to map shared descriptor\n");
642 return -ENOMEM;
643 }
644
645#ifdef DEBUG
646 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
647 DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 desc_bytes(desc), 1);
649#endif
650
651 return ret;
652}
653
Kim Phillips8e8ec592011-03-13 16:54:26 +0800654/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800655 * aead_edesc - s/w-extended aead descriptor
656 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Kim Phillips8e8ec592011-03-13 16:54:26 +0800657 * @src_nents: number of segments in input scatterlist
658 * @dst_nents: number of segments in output scatterlist
Yuan Kang1acebad2011-07-15 11:21:42 +0800659 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800660 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500661 * @sec4_sg_bytes: length of dma mapped sec4_sg space
662 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800663 * @hw_desc: the h/w job descriptor followed by any referenced link tables
664 */
Yuan Kang0e479302011-07-15 11:21:41 +0800665struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800666 int assoc_nents;
667 int src_nents;
668 int dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +0800669 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500670 int sec4_sg_bytes;
671 dma_addr_t sec4_sg_dma;
672 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800673 u32 hw_desc[0];
674};
675
Yuan Kangacdca312011-07-15 11:21:42 +0800676/*
677 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
678 * @src_nents: number of segments in input scatterlist
679 * @dst_nents: number of segments in output scatterlist
680 * @iv_dma: dma address of iv for checking continuity and link table
681 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500682 * @sec4_sg_bytes: length of dma mapped sec4_sg space
683 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +0800684 * @hw_desc: the h/w job descriptor followed by any referenced link tables
685 */
686struct ablkcipher_edesc {
687 int src_nents;
688 int dst_nents;
689 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500690 int sec4_sg_bytes;
691 dma_addr_t sec4_sg_dma;
692 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +0800693 u32 hw_desc[0];
694};
695
Yuan Kang1acebad2011-07-15 11:21:42 +0800696static void caam_unmap(struct device *dev, struct scatterlist *src,
697 struct scatterlist *dst, int src_nents, int dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -0500698 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
699 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800700{
Yuan Kang1acebad2011-07-15 11:21:42 +0800701 if (unlikely(dst != src)) {
702 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
703 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800704 } else {
Yuan Kang1acebad2011-07-15 11:21:42 +0800705 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800706 }
707
Yuan Kang1acebad2011-07-15 11:21:42 +0800708 if (iv_dma)
709 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -0500710 if (sec4_sg_bytes)
711 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800712 DMA_TO_DEVICE);
713}
714
Yuan Kang1acebad2011-07-15 11:21:42 +0800715static void aead_unmap(struct device *dev,
716 struct aead_edesc *edesc,
717 struct aead_request *req)
718{
719 struct crypto_aead *aead = crypto_aead_reqtfm(req);
720 int ivsize = crypto_aead_ivsize(aead);
721
722 dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
723
724 caam_unmap(dev, req->src, req->dst,
725 edesc->src_nents, edesc->dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -0500726 edesc->iv_dma, ivsize, edesc->sec4_sg_dma,
727 edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +0800728}
729
Yuan Kangacdca312011-07-15 11:21:42 +0800730static void ablkcipher_unmap(struct device *dev,
731 struct ablkcipher_edesc *edesc,
732 struct ablkcipher_request *req)
733{
734 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
735 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
736
737 caam_unmap(dev, req->src, req->dst,
738 edesc->src_nents, edesc->dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -0500739 edesc->iv_dma, ivsize, edesc->sec4_sg_dma,
740 edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +0800741}
742
Yuan Kang0e479302011-07-15 11:21:41 +0800743static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800744 void *context)
745{
Yuan Kang0e479302011-07-15 11:21:41 +0800746 struct aead_request *req = context;
747 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800748#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800749 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800750 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800751 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800752
753 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
754#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800755
Yuan Kang0e479302011-07-15 11:21:41 +0800756 edesc = (struct aead_edesc *)((char *)desc -
757 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800758
759 if (err) {
Kim Phillipsde2954d2011-05-02 18:29:17 -0500760 char tmp[CAAM_ERROR_STR_MAX];
Kim Phillips8e8ec592011-03-13 16:54:26 +0800761
Kim Phillips8e8ec592011-03-13 16:54:26 +0800762 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
763 }
764
Yuan Kang0e479302011-07-15 11:21:41 +0800765 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800766
767#ifdef DEBUG
768 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800769 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
770 req->assoclen , 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800771 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800772 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800773 edesc->src_nents ? 100 : ivsize, 1);
774 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800775 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
776 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800777 ctx->authsize + 4, 1);
778#endif
779
780 kfree(edesc);
781
Yuan Kang0e479302011-07-15 11:21:41 +0800782 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800783}
784
Yuan Kang0e479302011-07-15 11:21:41 +0800785static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800786 void *context)
787{
Yuan Kang0e479302011-07-15 11:21:41 +0800788 struct aead_request *req = context;
789 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800790#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800791 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800792 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800793 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800794
795 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
796#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800797
Yuan Kang0e479302011-07-15 11:21:41 +0800798 edesc = (struct aead_edesc *)((char *)desc -
799 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800800
Yuan Kang1acebad2011-07-15 11:21:42 +0800801#ifdef DEBUG
802 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
803 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
804 ivsize, 1);
805 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
806 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
807 req->cryptlen, 1);
808#endif
809
Kim Phillips8e8ec592011-03-13 16:54:26 +0800810 if (err) {
Kim Phillipsde2954d2011-05-02 18:29:17 -0500811 char tmp[CAAM_ERROR_STR_MAX];
Kim Phillips8e8ec592011-03-13 16:54:26 +0800812
813 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
814 }
815
Yuan Kang0e479302011-07-15 11:21:41 +0800816 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800817
818 /*
819 * verify hw auth check passed else return -EBADMSG
820 */
821 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
822 err = -EBADMSG;
823
824#ifdef DEBUG
825 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
826 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +0800827 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
828 sizeof(struct iphdr) + req->assoclen +
829 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800830 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -0500831 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +0800832 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800833 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
834 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
835 sg->length + ctx->authsize + 16, 1);
836 }
837#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800838
Kim Phillips8e8ec592011-03-13 16:54:26 +0800839 kfree(edesc);
840
Yuan Kang0e479302011-07-15 11:21:41 +0800841 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800842}
843
Yuan Kangacdca312011-07-15 11:21:42 +0800844static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
845 void *context)
846{
847 struct ablkcipher_request *req = context;
848 struct ablkcipher_edesc *edesc;
849#ifdef DEBUG
850 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
851 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
852
853 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
854#endif
855
856 edesc = (struct ablkcipher_edesc *)((char *)desc -
857 offsetof(struct ablkcipher_edesc, hw_desc));
858
859 if (err) {
860 char tmp[CAAM_ERROR_STR_MAX];
861
862 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
863 }
864
865#ifdef DEBUG
866 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
867 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
868 edesc->src_nents > 1 ? 100 : ivsize, 1);
869 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
870 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
871 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
872#endif
873
874 ablkcipher_unmap(jrdev, edesc, req);
875 kfree(edesc);
876
877 ablkcipher_request_complete(req, err);
878}
879
880static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
881 void *context)
882{
883 struct ablkcipher_request *req = context;
884 struct ablkcipher_edesc *edesc;
885#ifdef DEBUG
886 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
887 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
888
889 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
890#endif
891
892 edesc = (struct ablkcipher_edesc *)((char *)desc -
893 offsetof(struct ablkcipher_edesc, hw_desc));
894 if (err) {
895 char tmp[CAAM_ERROR_STR_MAX];
896
897 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
898 }
899
900#ifdef DEBUG
901 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
902 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
903 ivsize, 1);
904 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
905 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
906 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
907#endif
908
909 ablkcipher_unmap(jrdev, edesc, req);
910 kfree(edesc);
911
912 ablkcipher_request_complete(req, err);
913}
914
Kim Phillips8e8ec592011-03-13 16:54:26 +0800915/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800916 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +0800917 */
Yuan Kang1acebad2011-07-15 11:21:42 +0800918static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
919 struct aead_edesc *edesc,
920 struct aead_request *req,
921 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800922{
Yuan Kang0e479302011-07-15 11:21:41 +0800923 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800924 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800925 int ivsize = crypto_aead_ivsize(aead);
926 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800927 u32 *desc = edesc->hw_desc;
928 u32 out_options = 0, in_options;
929 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500930 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800931
Yuan Kang1acebad2011-07-15 11:21:42 +0800932#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +0800933 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +0800934 req->assoclen, req->cryptlen, authsize);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800935 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800936 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
937 req->assoclen , 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800938 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800939 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800940 edesc->src_nents ? 100 : ivsize, 1);
941 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800942 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +0800943 edesc->src_nents ? 100 : req->cryptlen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800944 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
945 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
946 desc_bytes(sh_desc), 1);
947#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800948
949 len = desc_len(sh_desc);
950 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
951
952 if (all_contig) {
953 src_dma = sg_dma_address(req->assoc);
954 in_options = 0;
955 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500956 src_dma = edesc->sec4_sg_dma;
957 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
958 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +0800959 in_options = LDST_SGF;
960 }
961 if (encrypt)
962 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
963 req->cryptlen - authsize, in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800964 else
Yuan Kang1acebad2011-07-15 11:21:42 +0800965 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
966 req->cryptlen, in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800967
Yuan Kang1acebad2011-07-15 11:21:42 +0800968 if (likely(req->src == req->dst)) {
969 if (all_contig) {
970 dst_dma = sg_dma_address(req->src);
971 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500972 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +0800973 ((edesc->assoc_nents ? : 1) + 1);
974 out_options = LDST_SGF;
975 }
Kim Phillips8e8ec592011-03-13 16:54:26 +0800976 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800977 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +0800978 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800979 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500980 dst_dma = edesc->sec4_sg_dma +
981 sec4_sg_index *
982 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +0800983 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800984 }
985 }
Kim Phillips8e8ec592011-03-13 16:54:26 +0800986 if (encrypt)
Yuan Kang1acebad2011-07-15 11:21:42 +0800987 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800988 else
Yuan Kang1acebad2011-07-15 11:21:42 +0800989 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
990 out_options);
991}
992
993/*
994 * Fill in aead givencrypt job descriptor
995 */
996static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
997 struct aead_edesc *edesc,
998 struct aead_request *req,
999 int contig)
1000{
1001 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1002 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1003 int ivsize = crypto_aead_ivsize(aead);
1004 int authsize = ctx->authsize;
1005 u32 *desc = edesc->hw_desc;
1006 u32 out_options = 0, in_options;
1007 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001008 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001009
1010#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08001011 debug("assoclen %d cryptlen %d authsize %d\n",
1012 req->assoclen, req->cryptlen, authsize);
1013 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1014 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1015 req->assoclen , 1);
1016 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1017 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1018 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1019 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1020 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1021 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1022 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1023 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001024#endif
1025
Yuan Kang1acebad2011-07-15 11:21:42 +08001026 len = desc_len(sh_desc);
1027 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1028
1029 if (contig & GIV_SRC_CONTIG) {
1030 src_dma = sg_dma_address(req->assoc);
1031 in_options = 0;
1032 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001033 src_dma = edesc->sec4_sg_dma;
1034 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001035 in_options = LDST_SGF;
1036 }
1037 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1038 req->cryptlen - authsize, in_options);
1039
1040 if (contig & GIV_DST_CONTIG) {
1041 dst_dma = edesc->iv_dma;
1042 } else {
1043 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001044 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001045 edesc->assoc_nents;
1046 out_options = LDST_SGF;
1047 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001048 dst_dma = edesc->sec4_sg_dma +
1049 sec4_sg_index *
1050 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001051 out_options = LDST_SGF;
1052 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001053 }
1054
Yuan Kang1acebad2011-07-15 11:21:42 +08001055 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001056}
1057
1058/*
Yuan Kangacdca312011-07-15 11:21:42 +08001059 * Fill in ablkcipher job descriptor
1060 */
1061static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1062 struct ablkcipher_edesc *edesc,
1063 struct ablkcipher_request *req,
1064 bool iv_contig)
1065{
1066 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1067 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1068 u32 *desc = edesc->hw_desc;
1069 u32 out_options = 0, in_options;
1070 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001071 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001072
1073#ifdef DEBUG
1074 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1075 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1076 ivsize, 1);
1077 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1078 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1079 edesc->src_nents ? 100 : req->nbytes, 1);
1080#endif
1081
1082 len = desc_len(sh_desc);
1083 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1084
1085 if (iv_contig) {
1086 src_dma = edesc->iv_dma;
1087 in_options = 0;
1088 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001089 src_dma = edesc->sec4_sg_dma;
1090 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001091 in_options = LDST_SGF;
1092 }
1093 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1094
1095 if (likely(req->src == req->dst)) {
1096 if (!edesc->src_nents && iv_contig) {
1097 dst_dma = sg_dma_address(req->src);
1098 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001099 dst_dma = edesc->sec4_sg_dma +
1100 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001101 out_options = LDST_SGF;
1102 }
1103 } else {
1104 if (!edesc->dst_nents) {
1105 dst_dma = sg_dma_address(req->dst);
1106 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001107 dst_dma = edesc->sec4_sg_dma +
1108 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001109 out_options = LDST_SGF;
1110 }
1111 }
1112 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1113}
1114
1115/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001116 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001117 */
Yuan Kang0e479302011-07-15 11:21:41 +08001118static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Yuan Kang1acebad2011-07-15 11:21:42 +08001119 int desc_bytes, bool *all_contig_ptr)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001120{
Yuan Kang0e479302011-07-15 11:21:41 +08001121 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001122 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1123 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001124 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1125 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1126 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001127 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001128 dma_addr_t iv_dma = 0;
1129 int sgc;
1130 bool all_contig = true;
1131 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001132 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001133
Yuan Kang1acebad2011-07-15 11:21:42 +08001134 assoc_nents = sg_count(req->assoc, req->assoclen);
1135 src_nents = sg_count(req->src, req->cryptlen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001136
Yuan Kang1acebad2011-07-15 11:21:42 +08001137 if (unlikely(req->dst != req->src))
1138 dst_nents = sg_count(req->dst, req->cryptlen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001139
Yuan Kang1acebad2011-07-15 11:21:42 +08001140 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1141 DMA_BIDIRECTIONAL);
1142 if (likely(req->src == req->dst)) {
1143 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1144 DMA_BIDIRECTIONAL);
1145 } else {
1146 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1147 DMA_TO_DEVICE);
1148 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1149 DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001150 }
1151
Yuan Kang1acebad2011-07-15 11:21:42 +08001152 /* Check if data are contiguous */
1153 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1154 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1155 iv_dma || src_nents || iv_dma + ivsize !=
1156 sg_dma_address(req->src)) {
1157 all_contig = false;
1158 assoc_nents = assoc_nents ? : 1;
1159 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001160 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001161 }
Yuan Kanga299c832012-06-22 19:48:46 -05001162 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001163
Yuan Kanga299c832012-06-22 19:48:46 -05001164 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001165
1166 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001167 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001168 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001169 if (!edesc) {
1170 dev_err(jrdev, "could not allocate extended descriptor\n");
1171 return ERR_PTR(-ENOMEM);
1172 }
1173
1174 edesc->assoc_nents = assoc_nents;
1175 edesc->src_nents = src_nents;
1176 edesc->dst_nents = dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001177 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001178 edesc->sec4_sg_bytes = sec4_sg_bytes;
1179 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1180 desc_bytes;
1181 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1182 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kang1acebad2011-07-15 11:21:42 +08001183 *all_contig_ptr = all_contig;
1184
Yuan Kanga299c832012-06-22 19:48:46 -05001185 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001186 if (!all_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001187 sg_to_sec4_sg(req->assoc,
1188 (assoc_nents ? : 1),
1189 edesc->sec4_sg +
1190 sec4_sg_index, 0);
1191 sec4_sg_index += assoc_nents ? : 1;
1192 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001193 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001194 sec4_sg_index += 1;
1195 sg_to_sec4_sg_last(req->src,
1196 (src_nents ? : 1),
1197 edesc->sec4_sg +
1198 sec4_sg_index, 0);
1199 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08001200 }
1201 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001202 sg_to_sec4_sg_last(req->dst, dst_nents,
1203 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001204 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001205
1206 return edesc;
1207}
1208
Yuan Kang0e479302011-07-15 11:21:41 +08001209static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001210{
Yuan Kang0e479302011-07-15 11:21:41 +08001211 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001212 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001213 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1214 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001215 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001216 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001217 int ret = 0;
1218
1219 req->cryptlen += ctx->authsize;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001220
Kim Phillips8e8ec592011-03-13 16:54:26 +08001221 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001222 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1223 CAAM_CMD_SZ, &all_contig);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001224 if (IS_ERR(edesc))
1225 return PTR_ERR(edesc);
1226
Yuan Kang1acebad2011-07-15 11:21:42 +08001227 /* Create and submit job descriptor */
1228 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1229 all_contig, true);
1230#ifdef DEBUG
1231 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1233 desc_bytes(edesc->hw_desc), 1);
1234#endif
1235
Kim Phillips8e8ec592011-03-13 16:54:26 +08001236 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001237 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1238 if (!ret) {
1239 ret = -EINPROGRESS;
1240 } else {
1241 aead_unmap(jrdev, edesc, req);
1242 kfree(edesc);
1243 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001244
Yuan Kang1acebad2011-07-15 11:21:42 +08001245 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001246}
1247
Yuan Kang0e479302011-07-15 11:21:41 +08001248static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001249{
Yuan Kang1acebad2011-07-15 11:21:42 +08001250 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001251 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08001252 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1253 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001254 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08001255 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001256 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001257
1258 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001259 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1260 CAAM_CMD_SZ, &all_contig);
Yuan Kang0e479302011-07-15 11:21:41 +08001261 if (IS_ERR(edesc))
1262 return PTR_ERR(edesc);
1263
Yuan Kang1acebad2011-07-15 11:21:42 +08001264#ifdef DEBUG
1265 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1266 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1267 req->cryptlen, 1);
1268#endif
1269
1270 /* Create and submit job descriptor*/
1271 init_aead_job(ctx->sh_desc_dec,
1272 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1273#ifdef DEBUG
1274 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1275 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1276 desc_bytes(edesc->hw_desc), 1);
1277#endif
1278
Yuan Kang0e479302011-07-15 11:21:41 +08001279 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001280 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1281 if (!ret) {
1282 ret = -EINPROGRESS;
1283 } else {
1284 aead_unmap(jrdev, edesc, req);
1285 kfree(edesc);
1286 }
Yuan Kang0e479302011-07-15 11:21:41 +08001287
Yuan Kang1acebad2011-07-15 11:21:42 +08001288 return ret;
1289}
Yuan Kang0e479302011-07-15 11:21:41 +08001290
Yuan Kang1acebad2011-07-15 11:21:42 +08001291/*
1292 * allocate and map the aead extended descriptor for aead givencrypt
1293 */
1294static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1295 *greq, int desc_bytes,
1296 u32 *contig_ptr)
1297{
1298 struct aead_request *req = &greq->areq;
1299 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1300 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1301 struct device *jrdev = ctx->jrdev;
1302 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1303 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1304 int assoc_nents, src_nents, dst_nents = 0;
1305 struct aead_edesc *edesc;
1306 dma_addr_t iv_dma = 0;
1307 int sgc;
1308 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1309 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001310 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Yuan Kang0e479302011-07-15 11:21:41 +08001311
Yuan Kang1acebad2011-07-15 11:21:42 +08001312 assoc_nents = sg_count(req->assoc, req->assoclen);
1313 src_nents = sg_count(req->src, req->cryptlen);
Yuan Kang0e479302011-07-15 11:21:41 +08001314
Yuan Kang1acebad2011-07-15 11:21:42 +08001315 if (unlikely(req->dst != req->src))
1316 dst_nents = sg_count(req->dst, req->cryptlen);
1317
1318 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1319 DMA_BIDIRECTIONAL);
1320 if (likely(req->src == req->dst)) {
1321 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1322 DMA_BIDIRECTIONAL);
1323 } else {
1324 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1325 DMA_TO_DEVICE);
1326 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1327 DMA_FROM_DEVICE);
1328 }
1329
1330 /* Check if data are contiguous */
1331 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1332 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1333 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1334 contig &= ~GIV_SRC_CONTIG;
1335 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1336 contig &= ~GIV_DST_CONTIG;
1337 if (unlikely(req->src != req->dst)) {
1338 dst_nents = dst_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001339 sec4_sg_len += 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08001340 }
1341 if (!(contig & GIV_SRC_CONTIG)) {
1342 assoc_nents = assoc_nents ? : 1;
1343 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001344 sec4_sg_len += assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001345 if (likely(req->src == req->dst))
1346 contig &= ~GIV_DST_CONTIG;
1347 }
Yuan Kanga299c832012-06-22 19:48:46 -05001348 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001349
Yuan Kanga299c832012-06-22 19:48:46 -05001350 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001351
1352 /* allocate space for base edesc and hw desc commands, link tables */
1353 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001354 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08001355 if (!edesc) {
1356 dev_err(jrdev, "could not allocate extended descriptor\n");
1357 return ERR_PTR(-ENOMEM);
1358 }
1359
1360 edesc->assoc_nents = assoc_nents;
1361 edesc->src_nents = src_nents;
1362 edesc->dst_nents = dst_nents;
1363 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001364 edesc->sec4_sg_bytes = sec4_sg_bytes;
1365 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1366 desc_bytes;
1367 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1368 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kang1acebad2011-07-15 11:21:42 +08001369 *contig_ptr = contig;
1370
Yuan Kanga299c832012-06-22 19:48:46 -05001371 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001372 if (!(contig & GIV_SRC_CONTIG)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001373 sg_to_sec4_sg(req->assoc, assoc_nents,
1374 edesc->sec4_sg +
1375 sec4_sg_index, 0);
1376 sec4_sg_index += assoc_nents;
1377 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001378 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001379 sec4_sg_index += 1;
1380 sg_to_sec4_sg_last(req->src, src_nents,
1381 edesc->sec4_sg +
1382 sec4_sg_index, 0);
1383 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001384 }
1385 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05001386 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001387 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001388 sec4_sg_index += 1;
1389 sg_to_sec4_sg_last(req->dst, dst_nents,
1390 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001391 }
1392
1393 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001394}
1395
1396static int aead_givencrypt(struct aead_givcrypt_request *areq)
1397{
1398 struct aead_request *req = &areq->areq;
1399 struct aead_edesc *edesc;
1400 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001401 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1402 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001403 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001404 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001405 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001406
Yuan Kang1acebad2011-07-15 11:21:42 +08001407 req->cryptlen += ctx->authsize;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001408
1409 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001410 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1411 CAAM_CMD_SZ, &contig);
1412
Kim Phillips8e8ec592011-03-13 16:54:26 +08001413 if (IS_ERR(edesc))
1414 return PTR_ERR(edesc);
1415
Yuan Kang1acebad2011-07-15 11:21:42 +08001416#ifdef DEBUG
1417 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1418 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1419 req->cryptlen, 1);
1420#endif
1421
1422 /* Create and submit job descriptor*/
1423 init_aead_giv_job(ctx->sh_desc_givenc,
1424 ctx->sh_desc_givenc_dma, edesc, req, contig);
1425#ifdef DEBUG
1426 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1427 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1428 desc_bytes(edesc->hw_desc), 1);
1429#endif
1430
Kim Phillips8e8ec592011-03-13 16:54:26 +08001431 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001432 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1433 if (!ret) {
1434 ret = -EINPROGRESS;
1435 } else {
1436 aead_unmap(jrdev, edesc, req);
1437 kfree(edesc);
1438 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001439
Yuan Kang1acebad2011-07-15 11:21:42 +08001440 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001441}
1442
Yuan Kangacdca312011-07-15 11:21:42 +08001443/*
1444 * allocate and map the ablkcipher extended descriptor for ablkcipher
1445 */
1446static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1447 *req, int desc_bytes,
1448 bool *iv_contig_out)
1449{
1450 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1451 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1452 struct device *jrdev = ctx->jrdev;
1453 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1454 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1455 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05001456 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001457 struct ablkcipher_edesc *edesc;
1458 dma_addr_t iv_dma = 0;
1459 bool iv_contig = false;
1460 int sgc;
1461 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kanga299c832012-06-22 19:48:46 -05001462 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08001463
1464 src_nents = sg_count(req->src, req->nbytes);
1465
1466 if (unlikely(req->dst != req->src))
1467 dst_nents = sg_count(req->dst, req->nbytes);
1468
1469 if (likely(req->src == req->dst)) {
1470 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1471 DMA_BIDIRECTIONAL);
1472 } else {
1473 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1474 DMA_TO_DEVICE);
1475 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1476 DMA_FROM_DEVICE);
1477 }
1478
1479 /*
1480 * Check if iv can be contiguous with source and destination.
1481 * If so, include it. If not, create scatterlist.
1482 */
1483 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1484 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1485 iv_contig = true;
1486 else
1487 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001488 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1489 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001490
1491 /* allocate space for base edesc and hw desc commands, link tables */
1492 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001493 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08001494 if (!edesc) {
1495 dev_err(jrdev, "could not allocate extended descriptor\n");
1496 return ERR_PTR(-ENOMEM);
1497 }
1498
1499 edesc->src_nents = src_nents;
1500 edesc->dst_nents = dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -05001501 edesc->sec4_sg_bytes = sec4_sg_bytes;
1502 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1503 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001504
Yuan Kanga299c832012-06-22 19:48:46 -05001505 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001506 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001507 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1508 sg_to_sec4_sg_last(req->src, src_nents,
1509 edesc->sec4_sg + 1, 0);
1510 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001511 }
1512
1513 if (unlikely(dst_nents)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001514 sg_to_sec4_sg_last(req->dst, dst_nents,
1515 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08001516 }
1517
Yuan Kanga299c832012-06-22 19:48:46 -05001518 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1519 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08001520 edesc->iv_dma = iv_dma;
1521
1522#ifdef DEBUG
Yuan Kanga299c832012-06-22 19:48:46 -05001523 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1525 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001526#endif
1527
1528 *iv_contig_out = iv_contig;
1529 return edesc;
1530}
1531
1532static int ablkcipher_encrypt(struct ablkcipher_request *req)
1533{
1534 struct ablkcipher_edesc *edesc;
1535 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1536 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1537 struct device *jrdev = ctx->jrdev;
1538 bool iv_contig;
1539 u32 *desc;
1540 int ret = 0;
1541
1542 /* allocate extended descriptor */
1543 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1544 CAAM_CMD_SZ, &iv_contig);
1545 if (IS_ERR(edesc))
1546 return PTR_ERR(edesc);
1547
1548 /* Create and submit job descriptor*/
1549 init_ablkcipher_job(ctx->sh_desc_enc,
1550 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1551#ifdef DEBUG
1552 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1553 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1554 desc_bytes(edesc->hw_desc), 1);
1555#endif
1556 desc = edesc->hw_desc;
1557 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1558
1559 if (!ret) {
1560 ret = -EINPROGRESS;
1561 } else {
1562 ablkcipher_unmap(jrdev, edesc, req);
1563 kfree(edesc);
1564 }
1565
1566 return ret;
1567}
1568
1569static int ablkcipher_decrypt(struct ablkcipher_request *req)
1570{
1571 struct ablkcipher_edesc *edesc;
1572 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1573 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1574 struct device *jrdev = ctx->jrdev;
1575 bool iv_contig;
1576 u32 *desc;
1577 int ret = 0;
1578
1579 /* allocate extended descriptor */
1580 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1581 CAAM_CMD_SZ, &iv_contig);
1582 if (IS_ERR(edesc))
1583 return PTR_ERR(edesc);
1584
1585 /* Create and submit job descriptor*/
1586 init_ablkcipher_job(ctx->sh_desc_dec,
1587 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1588 desc = edesc->hw_desc;
1589#ifdef DEBUG
1590 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1591 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1592 desc_bytes(edesc->hw_desc), 1);
1593#endif
1594
1595 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1596 if (!ret) {
1597 ret = -EINPROGRESS;
1598 } else {
1599 ablkcipher_unmap(jrdev, edesc, req);
1600 kfree(edesc);
1601 }
1602
1603 return ret;
1604}
1605
Yuan Kang885e9e22011-07-15 11:21:41 +08001606#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08001607#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08001608struct caam_alg_template {
1609 char name[CRYPTO_MAX_ALG_NAME];
1610 char driver_name[CRYPTO_MAX_ALG_NAME];
1611 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08001612 u32 type;
1613 union {
1614 struct ablkcipher_alg ablkcipher;
1615 struct aead_alg aead;
1616 struct blkcipher_alg blkcipher;
1617 struct cipher_alg cipher;
1618 struct compress_alg compress;
1619 struct rng_alg rng;
1620 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001621 u32 class1_alg_type;
1622 u32 class2_alg_type;
1623 u32 alg_op;
1624};
1625
1626static struct caam_alg_template driver_algs[] = {
1627 /* single-pass ipsec_esp descriptor */
1628 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001629 .name = "authenc(hmac(md5),cbc(aes))",
1630 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1631 .blocksize = AES_BLOCK_SIZE,
1632 .type = CRYPTO_ALG_TYPE_AEAD,
1633 .template_aead = {
1634 .setkey = aead_setkey,
1635 .setauthsize = aead_setauthsize,
1636 .encrypt = aead_encrypt,
1637 .decrypt = aead_decrypt,
1638 .givencrypt = aead_givencrypt,
1639 .geniv = "<built-in>",
1640 .ivsize = AES_BLOCK_SIZE,
1641 .maxauthsize = MD5_DIGEST_SIZE,
1642 },
1643 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1644 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1645 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1646 },
1647 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001648 .name = "authenc(hmac(sha1),cbc(aes))",
1649 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1650 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001651 .type = CRYPTO_ALG_TYPE_AEAD,
1652 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001653 .setkey = aead_setkey,
1654 .setauthsize = aead_setauthsize,
1655 .encrypt = aead_encrypt,
1656 .decrypt = aead_decrypt,
1657 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001658 .geniv = "<built-in>",
1659 .ivsize = AES_BLOCK_SIZE,
1660 .maxauthsize = SHA1_DIGEST_SIZE,
1661 },
1662 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1663 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1664 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1665 },
1666 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001667 .name = "authenc(hmac(sha224),cbc(aes))",
1668 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1669 .blocksize = AES_BLOCK_SIZE,
1670 .template_aead = {
1671 .setkey = aead_setkey,
1672 .setauthsize = aead_setauthsize,
1673 .encrypt = aead_encrypt,
1674 .decrypt = aead_decrypt,
1675 .givencrypt = aead_givencrypt,
1676 .geniv = "<built-in>",
1677 .ivsize = AES_BLOCK_SIZE,
1678 .maxauthsize = SHA224_DIGEST_SIZE,
1679 },
1680 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1681 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1682 OP_ALG_AAI_HMAC_PRECOMP,
1683 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1684 },
1685 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001686 .name = "authenc(hmac(sha256),cbc(aes))",
1687 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1688 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001689 .type = CRYPTO_ALG_TYPE_AEAD,
1690 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001691 .setkey = aead_setkey,
1692 .setauthsize = aead_setauthsize,
1693 .encrypt = aead_encrypt,
1694 .decrypt = aead_decrypt,
1695 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001696 .geniv = "<built-in>",
1697 .ivsize = AES_BLOCK_SIZE,
1698 .maxauthsize = SHA256_DIGEST_SIZE,
1699 },
1700 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1701 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1702 OP_ALG_AAI_HMAC_PRECOMP,
1703 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1704 },
1705 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001706 .name = "authenc(hmac(sha384),cbc(aes))",
1707 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1708 .blocksize = AES_BLOCK_SIZE,
1709 .template_aead = {
1710 .setkey = aead_setkey,
1711 .setauthsize = aead_setauthsize,
1712 .encrypt = aead_encrypt,
1713 .decrypt = aead_decrypt,
1714 .givencrypt = aead_givencrypt,
1715 .geniv = "<built-in>",
1716 .ivsize = AES_BLOCK_SIZE,
1717 .maxauthsize = SHA384_DIGEST_SIZE,
1718 },
1719 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1720 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1721 OP_ALG_AAI_HMAC_PRECOMP,
1722 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1723 },
1724
1725 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05001726 .name = "authenc(hmac(sha512),cbc(aes))",
1727 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1728 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001729 .type = CRYPTO_ALG_TYPE_AEAD,
1730 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001731 .setkey = aead_setkey,
1732 .setauthsize = aead_setauthsize,
1733 .encrypt = aead_encrypt,
1734 .decrypt = aead_decrypt,
1735 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05001736 .geniv = "<built-in>",
1737 .ivsize = AES_BLOCK_SIZE,
1738 .maxauthsize = SHA512_DIGEST_SIZE,
1739 },
1740 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1741 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1742 OP_ALG_AAI_HMAC_PRECOMP,
1743 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1744 },
1745 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001746 .name = "authenc(hmac(md5),cbc(des3_ede))",
1747 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1748 .blocksize = DES3_EDE_BLOCK_SIZE,
1749 .type = CRYPTO_ALG_TYPE_AEAD,
1750 .template_aead = {
1751 .setkey = aead_setkey,
1752 .setauthsize = aead_setauthsize,
1753 .encrypt = aead_encrypt,
1754 .decrypt = aead_decrypt,
1755 .givencrypt = aead_givencrypt,
1756 .geniv = "<built-in>",
1757 .ivsize = DES3_EDE_BLOCK_SIZE,
1758 .maxauthsize = MD5_DIGEST_SIZE,
1759 },
1760 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1761 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1762 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1763 },
1764 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001765 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1766 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1767 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001768 .type = CRYPTO_ALG_TYPE_AEAD,
1769 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001770 .setkey = aead_setkey,
1771 .setauthsize = aead_setauthsize,
1772 .encrypt = aead_encrypt,
1773 .decrypt = aead_decrypt,
1774 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001775 .geniv = "<built-in>",
1776 .ivsize = DES3_EDE_BLOCK_SIZE,
1777 .maxauthsize = SHA1_DIGEST_SIZE,
1778 },
1779 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1780 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1781 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1782 },
1783 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001784 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1785 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1786 .blocksize = DES3_EDE_BLOCK_SIZE,
1787 .template_aead = {
1788 .setkey = aead_setkey,
1789 .setauthsize = aead_setauthsize,
1790 .encrypt = aead_encrypt,
1791 .decrypt = aead_decrypt,
1792 .givencrypt = aead_givencrypt,
1793 .geniv = "<built-in>",
1794 .ivsize = DES3_EDE_BLOCK_SIZE,
1795 .maxauthsize = SHA224_DIGEST_SIZE,
1796 },
1797 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1798 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1799 OP_ALG_AAI_HMAC_PRECOMP,
1800 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1801 },
1802 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001803 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1804 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1805 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001806 .type = CRYPTO_ALG_TYPE_AEAD,
1807 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001808 .setkey = aead_setkey,
1809 .setauthsize = aead_setauthsize,
1810 .encrypt = aead_encrypt,
1811 .decrypt = aead_decrypt,
1812 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001813 .geniv = "<built-in>",
1814 .ivsize = DES3_EDE_BLOCK_SIZE,
1815 .maxauthsize = SHA256_DIGEST_SIZE,
1816 },
1817 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1818 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1819 OP_ALG_AAI_HMAC_PRECOMP,
1820 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1821 },
1822 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001823 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1824 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1825 .blocksize = DES3_EDE_BLOCK_SIZE,
1826 .template_aead = {
1827 .setkey = aead_setkey,
1828 .setauthsize = aead_setauthsize,
1829 .encrypt = aead_encrypt,
1830 .decrypt = aead_decrypt,
1831 .givencrypt = aead_givencrypt,
1832 .geniv = "<built-in>",
1833 .ivsize = DES3_EDE_BLOCK_SIZE,
1834 .maxauthsize = SHA384_DIGEST_SIZE,
1835 },
1836 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1837 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1838 OP_ALG_AAI_HMAC_PRECOMP,
1839 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1840 },
1841 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05001842 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1843 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1844 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001845 .type = CRYPTO_ALG_TYPE_AEAD,
1846 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001847 .setkey = aead_setkey,
1848 .setauthsize = aead_setauthsize,
1849 .encrypt = aead_encrypt,
1850 .decrypt = aead_decrypt,
1851 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05001852 .geniv = "<built-in>",
1853 .ivsize = DES3_EDE_BLOCK_SIZE,
1854 .maxauthsize = SHA512_DIGEST_SIZE,
1855 },
1856 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1857 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1858 OP_ALG_AAI_HMAC_PRECOMP,
1859 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1860 },
1861 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001862 .name = "authenc(hmac(md5),cbc(des))",
1863 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1864 .blocksize = DES_BLOCK_SIZE,
1865 .type = CRYPTO_ALG_TYPE_AEAD,
1866 .template_aead = {
1867 .setkey = aead_setkey,
1868 .setauthsize = aead_setauthsize,
1869 .encrypt = aead_encrypt,
1870 .decrypt = aead_decrypt,
1871 .givencrypt = aead_givencrypt,
1872 .geniv = "<built-in>",
1873 .ivsize = DES_BLOCK_SIZE,
1874 .maxauthsize = MD5_DIGEST_SIZE,
1875 },
1876 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1877 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1878 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1879 },
1880 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001881 .name = "authenc(hmac(sha1),cbc(des))",
1882 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1883 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001884 .type = CRYPTO_ALG_TYPE_AEAD,
1885 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001886 .setkey = aead_setkey,
1887 .setauthsize = aead_setauthsize,
1888 .encrypt = aead_encrypt,
1889 .decrypt = aead_decrypt,
1890 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001891 .geniv = "<built-in>",
1892 .ivsize = DES_BLOCK_SIZE,
1893 .maxauthsize = SHA1_DIGEST_SIZE,
1894 },
1895 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1896 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1897 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1898 },
1899 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001900 .name = "authenc(hmac(sha224),cbc(des))",
1901 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1902 .blocksize = DES_BLOCK_SIZE,
1903 .template_aead = {
1904 .setkey = aead_setkey,
1905 .setauthsize = aead_setauthsize,
1906 .encrypt = aead_encrypt,
1907 .decrypt = aead_decrypt,
1908 .givencrypt = aead_givencrypt,
1909 .geniv = "<built-in>",
1910 .ivsize = DES_BLOCK_SIZE,
1911 .maxauthsize = SHA224_DIGEST_SIZE,
1912 },
1913 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1914 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1915 OP_ALG_AAI_HMAC_PRECOMP,
1916 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1917 },
1918 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001919 .name = "authenc(hmac(sha256),cbc(des))",
1920 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1921 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001922 .type = CRYPTO_ALG_TYPE_AEAD,
1923 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001924 .setkey = aead_setkey,
1925 .setauthsize = aead_setauthsize,
1926 .encrypt = aead_encrypt,
1927 .decrypt = aead_decrypt,
1928 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001929 .geniv = "<built-in>",
1930 .ivsize = DES_BLOCK_SIZE,
1931 .maxauthsize = SHA256_DIGEST_SIZE,
1932 },
1933 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1934 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1935 OP_ALG_AAI_HMAC_PRECOMP,
1936 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1937 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05001938 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001939 .name = "authenc(hmac(sha384),cbc(des))",
1940 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1941 .blocksize = DES_BLOCK_SIZE,
1942 .template_aead = {
1943 .setkey = aead_setkey,
1944 .setauthsize = aead_setauthsize,
1945 .encrypt = aead_encrypt,
1946 .decrypt = aead_decrypt,
1947 .givencrypt = aead_givencrypt,
1948 .geniv = "<built-in>",
1949 .ivsize = DES_BLOCK_SIZE,
1950 .maxauthsize = SHA384_DIGEST_SIZE,
1951 },
1952 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1953 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1954 OP_ALG_AAI_HMAC_PRECOMP,
1955 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1956 },
1957 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05001958 .name = "authenc(hmac(sha512),cbc(des))",
1959 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1960 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001961 .type = CRYPTO_ALG_TYPE_AEAD,
1962 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001963 .setkey = aead_setkey,
1964 .setauthsize = aead_setauthsize,
1965 .encrypt = aead_encrypt,
1966 .decrypt = aead_decrypt,
1967 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05001968 .geniv = "<built-in>",
1969 .ivsize = DES_BLOCK_SIZE,
1970 .maxauthsize = SHA512_DIGEST_SIZE,
1971 },
1972 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1973 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1974 OP_ALG_AAI_HMAC_PRECOMP,
1975 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1976 },
Yuan Kangacdca312011-07-15 11:21:42 +08001977 /* ablkcipher descriptor */
1978 {
1979 .name = "cbc(aes)",
1980 .driver_name = "cbc-aes-caam",
1981 .blocksize = AES_BLOCK_SIZE,
1982 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1983 .template_ablkcipher = {
1984 .setkey = ablkcipher_setkey,
1985 .encrypt = ablkcipher_encrypt,
1986 .decrypt = ablkcipher_decrypt,
1987 .geniv = "eseqiv",
1988 .min_keysize = AES_MIN_KEY_SIZE,
1989 .max_keysize = AES_MAX_KEY_SIZE,
1990 .ivsize = AES_BLOCK_SIZE,
1991 },
1992 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1993 },
1994 {
1995 .name = "cbc(des3_ede)",
1996 .driver_name = "cbc-3des-caam",
1997 .blocksize = DES3_EDE_BLOCK_SIZE,
1998 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1999 .template_ablkcipher = {
2000 .setkey = ablkcipher_setkey,
2001 .encrypt = ablkcipher_encrypt,
2002 .decrypt = ablkcipher_decrypt,
2003 .geniv = "eseqiv",
2004 .min_keysize = DES3_EDE_KEY_SIZE,
2005 .max_keysize = DES3_EDE_KEY_SIZE,
2006 .ivsize = DES3_EDE_BLOCK_SIZE,
2007 },
2008 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2009 },
2010 {
2011 .name = "cbc(des)",
2012 .driver_name = "cbc-des-caam",
2013 .blocksize = DES_BLOCK_SIZE,
2014 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2015 .template_ablkcipher = {
2016 .setkey = ablkcipher_setkey,
2017 .encrypt = ablkcipher_encrypt,
2018 .decrypt = ablkcipher_decrypt,
2019 .geniv = "eseqiv",
2020 .min_keysize = DES_KEY_SIZE,
2021 .max_keysize = DES_KEY_SIZE,
2022 .ivsize = DES_BLOCK_SIZE,
2023 },
2024 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2025 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002026};
2027
2028struct caam_crypto_alg {
2029 struct list_head entry;
2030 struct device *ctrldev;
2031 int class1_alg_type;
2032 int class2_alg_type;
2033 int alg_op;
2034 struct crypto_alg crypto_alg;
2035};
2036
2037static int caam_cra_init(struct crypto_tfm *tfm)
2038{
2039 struct crypto_alg *alg = tfm->__crt_alg;
2040 struct caam_crypto_alg *caam_alg =
2041 container_of(alg, struct caam_crypto_alg, crypto_alg);
2042 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2043 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2044 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2045
2046 /*
2047 * distribute tfms across job rings to ensure in-order
2048 * crypto request processing per tfm
2049 */
Yuan Kang8009a382012-06-22 19:48:44 -05002050 ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
Kim Phillips8e8ec592011-03-13 16:54:26 +08002051
2052 /* copy descriptor header template value */
2053 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2054 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2055 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2056
2057 return 0;
2058}
2059
2060static void caam_cra_exit(struct crypto_tfm *tfm)
2061{
2062 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2063
Yuan Kang1acebad2011-07-15 11:21:42 +08002064 if (ctx->sh_desc_enc_dma &&
2065 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2066 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2067 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2068 if (ctx->sh_desc_dec_dma &&
2069 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2070 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2071 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2072 if (ctx->sh_desc_givenc_dma &&
2073 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2074 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2075 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05002076 DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002077}
2078
2079static void __exit caam_algapi_exit(void)
2080{
2081
2082 struct device_node *dev_node;
2083 struct platform_device *pdev;
2084 struct device *ctrldev;
2085 struct caam_drv_private *priv;
2086 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002087
Kim Phillips54e198d2011-03-23 21:15:44 +08002088 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
Shengzhou Liua0ea0f62012-03-21 14:09:10 +08002089 if (!dev_node) {
2090 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2091 if (!dev_node)
2092 return;
2093 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002094
2095 pdev = of_find_device_by_node(dev_node);
2096 if (!pdev)
2097 return;
2098
2099 ctrldev = &pdev->dev;
2100 of_node_put(dev_node);
2101 priv = dev_get_drvdata(ctrldev);
2102
2103 if (!priv->alg_list.next)
2104 return;
2105
2106 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2107 crypto_unregister_alg(&t_alg->crypto_alg);
2108 list_del(&t_alg->entry);
2109 kfree(t_alg);
2110 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002111}
2112
2113static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2114 struct caam_alg_template
2115 *template)
2116{
2117 struct caam_crypto_alg *t_alg;
2118 struct crypto_alg *alg;
2119
2120 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2121 if (!t_alg) {
2122 dev_err(ctrldev, "failed to allocate t_alg\n");
2123 return ERR_PTR(-ENOMEM);
2124 }
2125
2126 alg = &t_alg->crypto_alg;
2127
2128 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2129 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2130 template->driver_name);
2131 alg->cra_module = THIS_MODULE;
2132 alg->cra_init = caam_cra_init;
2133 alg->cra_exit = caam_cra_exit;
2134 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002135 alg->cra_blocksize = template->blocksize;
2136 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002137 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002138 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2139 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08002140 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08002141 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2142 alg->cra_type = &crypto_ablkcipher_type;
2143 alg->cra_ablkcipher = template->template_ablkcipher;
2144 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08002145 case CRYPTO_ALG_TYPE_AEAD:
2146 alg->cra_type = &crypto_aead_type;
2147 alg->cra_aead = template->template_aead;
2148 break;
2149 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002150
2151 t_alg->class1_alg_type = template->class1_alg_type;
2152 t_alg->class2_alg_type = template->class2_alg_type;
2153 t_alg->alg_op = template->alg_op;
2154 t_alg->ctrldev = ctrldev;
2155
2156 return t_alg;
2157}
2158
2159static int __init caam_algapi_init(void)
2160{
2161 struct device_node *dev_node;
2162 struct platform_device *pdev;
Yuan Kang8009a382012-06-22 19:48:44 -05002163 struct device *ctrldev;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002164 struct caam_drv_private *priv;
2165 int i = 0, err = 0;
2166
Kim Phillips54e198d2011-03-23 21:15:44 +08002167 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
Shengzhou Liua0ea0f62012-03-21 14:09:10 +08002168 if (!dev_node) {
2169 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2170 if (!dev_node)
2171 return -ENODEV;
2172 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002173
2174 pdev = of_find_device_by_node(dev_node);
2175 if (!pdev)
2176 return -ENODEV;
2177
2178 ctrldev = &pdev->dev;
2179 priv = dev_get_drvdata(ctrldev);
2180 of_node_put(dev_node);
2181
2182 INIT_LIST_HEAD(&priv->alg_list);
2183
Kim Phillips8e8ec592011-03-13 16:54:26 +08002184 atomic_set(&priv->tfm_count, -1);
2185
2186 /* register crypto algorithms the device supports */
2187 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2188 /* TODO: check if h/w supports alg */
2189 struct caam_crypto_alg *t_alg;
2190
2191 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2192 if (IS_ERR(t_alg)) {
2193 err = PTR_ERR(t_alg);
2194 dev_warn(ctrldev, "%s alg allocation failed\n",
Dan Carpentercdc712d2011-03-23 21:20:27 +08002195 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002196 continue;
2197 }
2198
2199 err = crypto_register_alg(&t_alg->crypto_alg);
2200 if (err) {
2201 dev_warn(ctrldev, "%s alg registration failed\n",
2202 t_alg->crypto_alg.cra_driver_name);
2203 kfree(t_alg);
Kim Phillips01135292012-01-09 18:26:49 -06002204 } else
Kim Phillips8e8ec592011-03-13 16:54:26 +08002205 list_add_tail(&t_alg->entry, &priv->alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002206 }
Kim Phillips01135292012-01-09 18:26:49 -06002207 if (!list_empty(&priv->alg_list))
2208 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2209 (char *)of_get_property(dev_node, "compatible", NULL));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002210
2211 return err;
2212}
2213
2214module_init(caam_algapi_init);
2215module_exit(caam_algapi_exit);
2216
2217MODULE_LICENSE("GPL");
2218MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2219MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");