blob: 87d9de48a39e26f1bf8f806cd00363d6c2373cd8 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Yuan Kangacdca312011-07-15 11:21:42 +080077#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
78#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
79 20 * CAAM_CMD_SZ)
80#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
81 15 * CAAM_CMD_SZ)
82
Yuan Kang1acebad2011-07-15 11:21:42 +080083#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
84 CAAM_MAX_KEY_SIZE)
85#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050086
Kim Phillips8e8ec592011-03-13 16:54:26 +080087#ifdef DEBUG
88/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080089#define debug(format, arg...) printk(format, arg)
90#else
91#define debug(format, arg...)
92#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +053093static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +080094
Yuan Kang1acebad2011-07-15 11:21:42 +080095/* Set DK bit in class 1 operation if shared */
96static inline void append_dec_op1(u32 *desc, u32 type)
97{
98 u32 *jump_cmd, *uncond_jump_cmd;
99
100 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
101 append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 OP_ALG_DECRYPT);
103 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
104 set_jump_tgt_here(desc, jump_cmd);
105 append_operation(desc, type | OP_ALG_AS_INITFINAL |
106 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
107 set_jump_tgt_here(desc, uncond_jump_cmd);
108}
109
110/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800111 * For aead functions, read payload and write payload,
112 * both of which are specified in req->src and req->dst
113 */
114static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
115{
Horia Geantaae4a8252014-03-14 17:46:52 +0200116 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800117 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
118 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800119}
120
121/*
122 * For aead encrypt and decrypt, read iv for both classes
123 */
124static inline void aead_append_ld_iv(u32 *desc, int ivsize)
125{
126 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
127 LDST_CLASS_1_CCB | ivsize);
128 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
129}
130
131/*
Yuan Kangacdca312011-07-15 11:21:42 +0800132 * For ablkcipher encrypt and decrypt, read from req->src and
133 * write to req->dst
134 */
135static inline void ablkcipher_append_src_dst(u32 *desc)
136{
Kim Phillips70d793c2012-06-22 19:42:35 -0500137 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
138 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
139 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
140 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
141 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800142}
143
144/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800145 * If all data, including src (with assoc and iv) or dst (with iv only) are
146 * contiguous
147 */
148#define GIV_SRC_CONTIG 1
149#define GIV_DST_CONTIG (1 << 1)
150
Kim Phillips8e8ec592011-03-13 16:54:26 +0800151/*
152 * per-session context
153 */
154struct caam_ctx {
155 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800156 u32 sh_desc_enc[DESC_MAX_USED_LEN];
157 u32 sh_desc_dec[DESC_MAX_USED_LEN];
158 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
159 dma_addr_t sh_desc_enc_dma;
160 dma_addr_t sh_desc_dec_dma;
161 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800162 u32 class1_alg_type;
163 u32 class2_alg_type;
164 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800165 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800166 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800167 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800168 unsigned int split_key_len;
169 unsigned int split_key_pad_len;
170 unsigned int authsize;
171};
172
Yuan Kang1acebad2011-07-15 11:21:42 +0800173static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
174 int keys_fit_inline)
175{
176 if (keys_fit_inline) {
177 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
178 ctx->split_key_len, CLASS_2 |
179 KEY_DEST_MDHA_SPLIT | KEY_ENC);
180 append_key_as_imm(desc, (void *)ctx->key +
181 ctx->split_key_pad_len, ctx->enckeylen,
182 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
183 } else {
184 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
185 KEY_DEST_MDHA_SPLIT | KEY_ENC);
186 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
187 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
188 }
189}
190
191static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
192 int keys_fit_inline)
193{
194 u32 *key_jump_cmd;
195
Kim Phillips61bb86b2012-07-13 17:49:28 -0500196 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800197
198 /* Skip if already shared */
199 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
200 JUMP_COND_SHRD);
201
202 append_key_aead(desc, ctx, keys_fit_inline);
203
204 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800205}
206
Horia Geantaae4a8252014-03-14 17:46:52 +0200207static int aead_null_set_sh_desc(struct crypto_aead *aead)
208{
209 struct aead_tfm *tfm = &aead->base.crt_aead;
210 struct caam_ctx *ctx = crypto_aead_ctx(aead);
211 struct device *jrdev = ctx->jrdev;
212 bool keys_fit_inline = false;
213 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
214 u32 *desc;
215
216 /*
217 * Job Descriptor and Shared Descriptors
218 * must all fit into the 64-word Descriptor h/w Buffer
219 */
220 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
221 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
222 keys_fit_inline = true;
223
224 /* aead_encrypt shared descriptor */
225 desc = ctx->sh_desc_enc;
226
227 init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229 /* Skip if already shared */
230 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
231 JUMP_COND_SHRD);
232 if (keys_fit_inline)
233 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
234 ctx->split_key_len, CLASS_2 |
235 KEY_DEST_MDHA_SPLIT | KEY_ENC);
236 else
237 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
238 KEY_DEST_MDHA_SPLIT | KEY_ENC);
239 set_jump_tgt_here(desc, key_jump_cmd);
240
241 /* cryptlen = seqoutlen - authsize */
242 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
243
244 /*
245 * NULL encryption; IV is zero
246 * assoclen = (assoclen + cryptlen) - cryptlen
247 */
248 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
249
250 /* read assoc before reading payload */
251 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
252 KEY_VLF);
253
254 /* Prepare to read and write cryptlen bytes */
255 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
256 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
257
258 /*
259 * MOVE_LEN opcode is not available in all SEC HW revisions,
260 * thus need to do some magic, i.e. self-patch the descriptor
261 * buffer.
262 */
263 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
264 MOVE_DEST_MATH3 |
265 (0x6 << MOVE_LEN_SHIFT));
266 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
267 MOVE_DEST_DESCBUF |
268 MOVE_WAITCOMP |
269 (0x8 << MOVE_LEN_SHIFT));
270
271 /* Class 2 operation */
272 append_operation(desc, ctx->class2_alg_type |
273 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
274
275 /* Read and write cryptlen bytes */
276 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
277
278 set_move_tgt_here(desc, read_move_cmd);
279 set_move_tgt_here(desc, write_move_cmd);
280 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
281 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
282 MOVE_AUX_LS);
283
284 /* Write ICV */
285 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
286 LDST_SRCDST_BYTE_CONTEXT);
287
288 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
289 desc_bytes(desc),
290 DMA_TO_DEVICE);
291 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
292 dev_err(jrdev, "unable to map shared descriptor\n");
293 return -ENOMEM;
294 }
295#ifdef DEBUG
296 print_hex_dump(KERN_ERR,
297 "aead null enc shdesc@"__stringify(__LINE__)": ",
298 DUMP_PREFIX_ADDRESS, 16, 4, desc,
299 desc_bytes(desc), 1);
300#endif
301
302 /*
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
305 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500306 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200307 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
308 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
309 keys_fit_inline = true;
310
311 desc = ctx->sh_desc_dec;
312
313 /* aead_decrypt shared descriptor */
314 init_sh_desc(desc, HDR_SHARE_SERIAL);
315
316 /* Skip if already shared */
317 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
318 JUMP_COND_SHRD);
319 if (keys_fit_inline)
320 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
321 ctx->split_key_len, CLASS_2 |
322 KEY_DEST_MDHA_SPLIT | KEY_ENC);
323 else
324 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
325 KEY_DEST_MDHA_SPLIT | KEY_ENC);
326 set_jump_tgt_here(desc, key_jump_cmd);
327
328 /* Class 2 operation */
329 append_operation(desc, ctx->class2_alg_type |
330 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
331
332 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
333 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
334 ctx->authsize + tfm->ivsize);
335 /* assoclen = (assoclen + cryptlen) - cryptlen */
336 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
337 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
338
339 /* read assoc before reading payload */
340 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
341 KEY_VLF);
342
343 /* Prepare to read and write cryptlen bytes */
344 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
345 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
346
347 /*
348 * MOVE_LEN opcode is not available in all SEC HW revisions,
349 * thus need to do some magic, i.e. self-patch the descriptor
350 * buffer.
351 */
352 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
353 MOVE_DEST_MATH2 |
354 (0x6 << MOVE_LEN_SHIFT));
355 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
356 MOVE_DEST_DESCBUF |
357 MOVE_WAITCOMP |
358 (0x8 << MOVE_LEN_SHIFT));
359
360 /* Read and write cryptlen bytes */
361 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
362
363 /*
364 * Insert a NOP here, since we need at least 4 instructions between
365 * code patching the descriptor buffer and the location being patched.
366 */
367 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
368 set_jump_tgt_here(desc, jump_cmd);
369
370 set_move_tgt_here(desc, read_move_cmd);
371 set_move_tgt_here(desc, write_move_cmd);
372 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
373 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
374 MOVE_AUX_LS);
375 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
376
377 /* Load ICV */
378 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
379 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
380
381 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
382 desc_bytes(desc),
383 DMA_TO_DEVICE);
384 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
385 dev_err(jrdev, "unable to map shared descriptor\n");
386 return -ENOMEM;
387 }
388#ifdef DEBUG
389 print_hex_dump(KERN_ERR,
390 "aead null dec shdesc@"__stringify(__LINE__)": ",
391 DUMP_PREFIX_ADDRESS, 16, 4, desc,
392 desc_bytes(desc), 1);
393#endif
394
395 return 0;
396}
397
Yuan Kang1acebad2011-07-15 11:21:42 +0800398static int aead_set_sh_desc(struct crypto_aead *aead)
399{
400 struct aead_tfm *tfm = &aead->base.crt_aead;
401 struct caam_ctx *ctx = crypto_aead_ctx(aead);
402 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800403 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800404 u32 geniv, moveiv;
405 u32 *desc;
406
Horia Geantaae4a8252014-03-14 17:46:52 +0200407 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800408 return 0;
409
Horia Geantaae4a8252014-03-14 17:46:52 +0200410 /* NULL encryption / decryption */
411 if (!ctx->enckeylen)
412 return aead_null_set_sh_desc(aead);
413
Yuan Kang1acebad2011-07-15 11:21:42 +0800414 /*
415 * Job Descriptor and Shared Descriptors
416 * must all fit into the 64-word Descriptor h/w Buffer
417 */
418 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
419 ctx->split_key_pad_len + ctx->enckeylen <=
420 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800421 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800422
423 /* aead_encrypt shared descriptor */
424 desc = ctx->sh_desc_enc;
425
426 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
427
428 /* Class 2 operation */
429 append_operation(desc, ctx->class2_alg_type |
430 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
431
432 /* cryptlen = seqoutlen - authsize */
433 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
434
435 /* assoclen + cryptlen = seqinlen - ivsize */
436 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
437
Horia Geanta4464a7d2014-03-14 17:46:49 +0200438 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800439 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
440
441 /* read assoc before reading payload */
442 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
443 KEY_VLF);
444 aead_append_ld_iv(desc, tfm->ivsize);
445
446 /* Class 1 operation */
447 append_operation(desc, ctx->class1_alg_type |
448 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
449
450 /* Read and write cryptlen bytes */
451 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
452 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
453 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
454
455 /* Write ICV */
456 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
457 LDST_SRCDST_BYTE_CONTEXT);
458
459 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
460 desc_bytes(desc),
461 DMA_TO_DEVICE);
462 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
463 dev_err(jrdev, "unable to map shared descriptor\n");
464 return -ENOMEM;
465 }
466#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300467 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800468 DUMP_PREFIX_ADDRESS, 16, 4, desc,
469 desc_bytes(desc), 1);
470#endif
471
472 /*
473 * Job Descriptor and Shared Descriptors
474 * must all fit into the 64-word Descriptor h/w Buffer
475 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500476 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800477 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
478 ctx->split_key_pad_len + ctx->enckeylen <=
479 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800480 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800481
Horia Geanta4464a7d2014-03-14 17:46:49 +0200482 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800483 desc = ctx->sh_desc_dec;
484
Horia Geanta4464a7d2014-03-14 17:46:49 +0200485 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800486
487 /* Class 2 operation */
488 append_operation(desc, ctx->class2_alg_type |
489 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
490
Horia Geanta4464a7d2014-03-14 17:46:49 +0200491 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800492 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200493 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800494 /* assoclen = (assoclen + cryptlen) - cryptlen */
495 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
496 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
497
498 /* read assoc before reading payload */
499 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
500 KEY_VLF);
501
502 aead_append_ld_iv(desc, tfm->ivsize);
503
504 append_dec_op1(desc, ctx->class1_alg_type);
505
506 /* Read and write cryptlen bytes */
507 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
508 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
509 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
510
511 /* Load ICV */
512 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
513 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800514
515 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
516 desc_bytes(desc),
517 DMA_TO_DEVICE);
518 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
519 dev_err(jrdev, "unable to map shared descriptor\n");
520 return -ENOMEM;
521 }
522#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300523 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800524 DUMP_PREFIX_ADDRESS, 16, 4, desc,
525 desc_bytes(desc), 1);
526#endif
527
528 /*
529 * Job Descriptor and Shared Descriptors
530 * must all fit into the 64-word Descriptor h/w Buffer
531 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500532 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800533 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
534 ctx->split_key_pad_len + ctx->enckeylen <=
535 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800536 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800537
538 /* aead_givencrypt shared descriptor */
539 desc = ctx->sh_desc_givenc;
540
541 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
542
543 /* Generate IV */
544 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
545 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
546 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
547 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
548 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
549 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
550 append_move(desc, MOVE_SRC_INFIFO |
551 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
552 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
553
554 /* Copy IV to class 1 context */
555 append_move(desc, MOVE_SRC_CLASS1CTX |
556 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
557
558 /* Return to encryption */
559 append_operation(desc, ctx->class2_alg_type |
560 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
561
562 /* ivsize + cryptlen = seqoutlen - authsize */
563 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
564
565 /* assoclen = seqinlen - (ivsize + cryptlen) */
566 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
567
568 /* read assoc before reading payload */
569 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
570 KEY_VLF);
571
572 /* Copy iv from class 1 ctx to class 2 fifo*/
573 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
574 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
575 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
576 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
577 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
578 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
579
580 /* Class 1 operation */
581 append_operation(desc, ctx->class1_alg_type |
582 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
583
584 /* Will write ivsize + cryptlen */
585 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
586
587 /* Not need to reload iv */
588 append_seq_fifo_load(desc, tfm->ivsize,
589 FIFOLD_CLASS_SKIP);
590
591 /* Will read cryptlen */
592 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
593 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
594
595 /* Write ICV */
596 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
597 LDST_SRCDST_BYTE_CONTEXT);
598
599 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
600 desc_bytes(desc),
601 DMA_TO_DEVICE);
602 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
603 dev_err(jrdev, "unable to map shared descriptor\n");
604 return -ENOMEM;
605 }
606#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300607 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800608 DUMP_PREFIX_ADDRESS, 16, 4, desc,
609 desc_bytes(desc), 1);
610#endif
611
612 return 0;
613}
614
Yuan Kang0e479302011-07-15 11:21:41 +0800615static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800616 unsigned int authsize)
617{
618 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
619
620 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800621 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800622
623 return 0;
624}
625
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500626static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
627 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800628{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500629 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
630 ctx->split_key_pad_len, key_in, authkeylen,
631 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800632}
633
Yuan Kang0e479302011-07-15 11:21:41 +0800634static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800635 const u8 *key, unsigned int keylen)
636{
637 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
638 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
639 struct caam_ctx *ctx = crypto_aead_ctx(aead);
640 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200641 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800642 int ret = 0;
643
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200644 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800645 goto badkey;
646
647 /* Pick class 2 key length from algorithm submask */
648 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
649 OP_ALG_ALGSEL_SHIFT] * 2;
650 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
651
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200652 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
653 goto badkey;
654
Kim Phillips8e8ec592011-03-13 16:54:26 +0800655#ifdef DEBUG
656 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200657 keys.authkeylen + keys.enckeylen, keys.enckeylen,
658 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800659 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
660 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300661 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800662 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
663#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +0800664
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200665 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800666 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800667 goto badkey;
668 }
669
670 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200671 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800672
Yuan Kang885e9e22011-07-15 11:21:41 +0800673 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200674 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +0800675 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800676 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +0800677 return -ENOMEM;
678 }
679#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300680 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800681 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200682 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800683#endif
684
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200685 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800686
Yuan Kang1acebad2011-07-15 11:21:42 +0800687 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800688 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +0800689 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200690 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800691 }
692
693 return ret;
694badkey:
695 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
696 return -EINVAL;
697}
698
Yuan Kangacdca312011-07-15 11:21:42 +0800699static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
700 const u8 *key, unsigned int keylen)
701{
702 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
703 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
704 struct device *jrdev = ctx->jrdev;
705 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +0200706 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +0800707 u32 *desc;
708
709#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300710 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800711 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
712#endif
713
714 memcpy(ctx->key, key, keylen);
715 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
716 DMA_TO_DEVICE);
717 if (dma_mapping_error(jrdev, ctx->key_dma)) {
718 dev_err(jrdev, "unable to map key i/o memory\n");
719 return -ENOMEM;
720 }
721 ctx->enckeylen = keylen;
722
723 /* ablkcipher_encrypt shared descriptor */
724 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -0500725 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800726 /* Skip if already shared */
727 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
728 JUMP_COND_SHRD);
729
730 /* Load class1 key only */
731 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
732 ctx->enckeylen, CLASS_1 |
733 KEY_DEST_CLASS_REG);
734
735 set_jump_tgt_here(desc, key_jump_cmd);
736
Yuan Kangacdca312011-07-15 11:21:42 +0800737 /* Load iv */
738 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
739 LDST_CLASS_1_CCB | tfm->ivsize);
740
741 /* Load operation */
742 append_operation(desc, ctx->class1_alg_type |
743 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
744
745 /* Perform operation */
746 ablkcipher_append_src_dst(desc);
747
748 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
749 desc_bytes(desc),
750 DMA_TO_DEVICE);
751 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
752 dev_err(jrdev, "unable to map shared descriptor\n");
753 return -ENOMEM;
754 }
755#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300756 print_hex_dump(KERN_ERR,
757 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800758 DUMP_PREFIX_ADDRESS, 16, 4, desc,
759 desc_bytes(desc), 1);
760#endif
761 /* ablkcipher_decrypt shared descriptor */
762 desc = ctx->sh_desc_dec;
763
Kim Phillips61bb86b2012-07-13 17:49:28 -0500764 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800765 /* Skip if already shared */
766 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767 JUMP_COND_SHRD);
768
769 /* Load class1 key only */
770 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
771 ctx->enckeylen, CLASS_1 |
772 KEY_DEST_CLASS_REG);
773
Yuan Kangacdca312011-07-15 11:21:42 +0800774 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +0800775
776 /* load IV */
777 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
778 LDST_CLASS_1_CCB | tfm->ivsize);
779
780 /* Choose operation */
781 append_dec_op1(desc, ctx->class1_alg_type);
782
783 /* Perform operation */
784 ablkcipher_append_src_dst(desc);
785
Yuan Kangacdca312011-07-15 11:21:42 +0800786 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
787 desc_bytes(desc),
788 DMA_TO_DEVICE);
789 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
790 dev_err(jrdev, "unable to map shared descriptor\n");
791 return -ENOMEM;
792 }
793
794#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300795 print_hex_dump(KERN_ERR,
796 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800797 DUMP_PREFIX_ADDRESS, 16, 4, desc,
798 desc_bytes(desc), 1);
799#endif
800
801 return ret;
802}
803
Kim Phillips8e8ec592011-03-13 16:54:26 +0800804/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800805 * aead_edesc - s/w-extended aead descriptor
806 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500807 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +0800808 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500809 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +0800810 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500811 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +0800812 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800813 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500814 * @sec4_sg_bytes: length of dma mapped sec4_sg space
815 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800816 * @hw_desc: the h/w job descriptor followed by any referenced link tables
817 */
Yuan Kang0e479302011-07-15 11:21:41 +0800818struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800819 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500820 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800821 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500822 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800823 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500824 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +0800825 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500826 int sec4_sg_bytes;
827 dma_addr_t sec4_sg_dma;
828 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800829 u32 hw_desc[0];
830};
831
Yuan Kangacdca312011-07-15 11:21:42 +0800832/*
833 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
834 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500835 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +0800836 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500837 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +0800838 * @iv_dma: dma address of iv for checking continuity and link table
839 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500840 * @sec4_sg_bytes: length of dma mapped sec4_sg space
841 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +0800842 * @hw_desc: the h/w job descriptor followed by any referenced link tables
843 */
844struct ablkcipher_edesc {
845 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500846 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +0800847 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500848 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +0800849 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500850 int sec4_sg_bytes;
851 dma_addr_t sec4_sg_dma;
852 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +0800853 u32 hw_desc[0];
854};
855
Yuan Kang1acebad2011-07-15 11:21:42 +0800856static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -0500857 struct scatterlist *dst, int src_nents,
858 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -0500859 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
860 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800861{
Yuan Kang643b39b2012-06-22 19:48:49 -0500862 if (dst != src) {
863 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
864 src_chained);
865 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
866 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800867 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -0500868 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
869 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800870 }
871
Yuan Kang1acebad2011-07-15 11:21:42 +0800872 if (iv_dma)
873 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -0500874 if (sec4_sg_bytes)
875 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800876 DMA_TO_DEVICE);
877}
878
Yuan Kang1acebad2011-07-15 11:21:42 +0800879static void aead_unmap(struct device *dev,
880 struct aead_edesc *edesc,
881 struct aead_request *req)
882{
883 struct crypto_aead *aead = crypto_aead_reqtfm(req);
884 int ivsize = crypto_aead_ivsize(aead);
885
Yuan Kang643b39b2012-06-22 19:48:49 -0500886 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
887 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +0800888
889 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -0500890 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
891 edesc->dst_chained, edesc->iv_dma, ivsize,
892 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +0800893}
894
Yuan Kangacdca312011-07-15 11:21:42 +0800895static void ablkcipher_unmap(struct device *dev,
896 struct ablkcipher_edesc *edesc,
897 struct ablkcipher_request *req)
898{
899 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
900 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
901
902 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -0500903 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
904 edesc->dst_chained, edesc->iv_dma, ivsize,
905 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +0800906}
907
Yuan Kang0e479302011-07-15 11:21:41 +0800908static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800909 void *context)
910{
Yuan Kang0e479302011-07-15 11:21:41 +0800911 struct aead_request *req = context;
912 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800913#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800914 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800915 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800916 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800917
918 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
919#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800920
Yuan Kang0e479302011-07-15 11:21:41 +0800921 edesc = (struct aead_edesc *)((char *)desc -
922 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800923
Marek Vasutfa9659c2014-04-24 20:05:12 +0200924 if (err)
925 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800926
Yuan Kang0e479302011-07-15 11:21:41 +0800927 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800928
929#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300930 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800931 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
932 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300933 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800934 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800935 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300936 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800937 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
938 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800939 ctx->authsize + 4, 1);
940#endif
941
942 kfree(edesc);
943
Yuan Kang0e479302011-07-15 11:21:41 +0800944 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800945}
946
Yuan Kang0e479302011-07-15 11:21:41 +0800947static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800948 void *context)
949{
Yuan Kang0e479302011-07-15 11:21:41 +0800950 struct aead_request *req = context;
951 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800952#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800953 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800954 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800955 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800956
957 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
958#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800959
Yuan Kang0e479302011-07-15 11:21:41 +0800960 edesc = (struct aead_edesc *)((char *)desc -
961 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800962
Yuan Kang1acebad2011-07-15 11:21:42 +0800963#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300964 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800965 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
966 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300967 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800968 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +0200969 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +0800970#endif
971
Marek Vasutfa9659c2014-04-24 20:05:12 +0200972 if (err)
973 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800974
Yuan Kang0e479302011-07-15 11:21:41 +0800975 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800976
977 /*
978 * verify hw auth check passed else return -EBADMSG
979 */
980 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
981 err = -EBADMSG;
982
983#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300984 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800985 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +0800986 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
987 sizeof(struct iphdr) + req->assoclen +
988 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800989 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -0500990 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +0800991 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +0300992 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800993 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
994 sg->length + ctx->authsize + 16, 1);
995 }
996#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800997
Kim Phillips8e8ec592011-03-13 16:54:26 +0800998 kfree(edesc);
999
Yuan Kang0e479302011-07-15 11:21:41 +08001000 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001001}
1002
Yuan Kangacdca312011-07-15 11:21:42 +08001003static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1004 void *context)
1005{
1006 struct ablkcipher_request *req = context;
1007 struct ablkcipher_edesc *edesc;
1008#ifdef DEBUG
1009 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1010 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1011
1012 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1013#endif
1014
1015 edesc = (struct ablkcipher_edesc *)((char *)desc -
1016 offsetof(struct ablkcipher_edesc, hw_desc));
1017
Marek Vasutfa9659c2014-04-24 20:05:12 +02001018 if (err)
1019 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001020
1021#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001022 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001023 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1024 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001025 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001026 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1027 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1028#endif
1029
1030 ablkcipher_unmap(jrdev, edesc, req);
1031 kfree(edesc);
1032
1033 ablkcipher_request_complete(req, err);
1034}
1035
1036static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1037 void *context)
1038{
1039 struct ablkcipher_request *req = context;
1040 struct ablkcipher_edesc *edesc;
1041#ifdef DEBUG
1042 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1043 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1044
1045 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1046#endif
1047
1048 edesc = (struct ablkcipher_edesc *)((char *)desc -
1049 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02001050 if (err)
1051 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001052
1053#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001054 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001055 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1056 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001057 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001058 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1059 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1060#endif
1061
1062 ablkcipher_unmap(jrdev, edesc, req);
1063 kfree(edesc);
1064
1065 ablkcipher_request_complete(req, err);
1066}
1067
Kim Phillips8e8ec592011-03-13 16:54:26 +08001068/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001069 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001070 */
Yuan Kang1acebad2011-07-15 11:21:42 +08001071static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1072 struct aead_edesc *edesc,
1073 struct aead_request *req,
1074 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001075{
Yuan Kang0e479302011-07-15 11:21:41 +08001076 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001077 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001078 int ivsize = crypto_aead_ivsize(aead);
1079 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08001080 u32 *desc = edesc->hw_desc;
1081 u32 out_options = 0, in_options;
1082 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001083 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001084
Yuan Kang1acebad2011-07-15 11:21:42 +08001085#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08001086 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08001087 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001088 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001089 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1090 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001091 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001092 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001093 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001094 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001095 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08001096 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001097 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001098 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1099 desc_bytes(sh_desc), 1);
1100#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001101
1102 len = desc_len(sh_desc);
1103 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1104
1105 if (all_contig) {
1106 src_dma = sg_dma_address(req->assoc);
1107 in_options = 0;
1108 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001109 src_dma = edesc->sec4_sg_dma;
1110 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1111 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001112 in_options = LDST_SGF;
1113 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001114
1115 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1116 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001117
Yuan Kang1acebad2011-07-15 11:21:42 +08001118 if (likely(req->src == req->dst)) {
1119 if (all_contig) {
1120 dst_dma = sg_dma_address(req->src);
1121 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001122 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001123 ((edesc->assoc_nents ? : 1) + 1);
1124 out_options = LDST_SGF;
1125 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001126 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001127 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08001128 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001129 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001130 dst_dma = edesc->sec4_sg_dma +
1131 sec4_sg_index *
1132 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001133 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001134 }
1135 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001136 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02001137 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1138 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001139 else
Yuan Kang1acebad2011-07-15 11:21:42 +08001140 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1141 out_options);
1142}
1143
1144/*
1145 * Fill in aead givencrypt job descriptor
1146 */
1147static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1148 struct aead_edesc *edesc,
1149 struct aead_request *req,
1150 int contig)
1151{
1152 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1153 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1154 int ivsize = crypto_aead_ivsize(aead);
1155 int authsize = ctx->authsize;
1156 u32 *desc = edesc->hw_desc;
1157 u32 out_options = 0, in_options;
1158 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001159 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001160
1161#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08001162 debug("assoclen %d cryptlen %d authsize %d\n",
1163 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001164 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001165 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1166 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001167 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001168 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001169 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001170 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1171 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001172 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001173 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1174 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001175#endif
1176
Yuan Kang1acebad2011-07-15 11:21:42 +08001177 len = desc_len(sh_desc);
1178 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1179
1180 if (contig & GIV_SRC_CONTIG) {
1181 src_dma = sg_dma_address(req->assoc);
1182 in_options = 0;
1183 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001184 src_dma = edesc->sec4_sg_dma;
1185 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001186 in_options = LDST_SGF;
1187 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001188 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1189 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08001190
1191 if (contig & GIV_DST_CONTIG) {
1192 dst_dma = edesc->iv_dma;
1193 } else {
1194 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001195 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001196 edesc->assoc_nents;
1197 out_options = LDST_SGF;
1198 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001199 dst_dma = edesc->sec4_sg_dma +
1200 sec4_sg_index *
1201 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001202 out_options = LDST_SGF;
1203 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001204 }
1205
Horia Geantabbf9c892013-11-28 15:11:16 +02001206 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1207 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001208}
1209
1210/*
Yuan Kangacdca312011-07-15 11:21:42 +08001211 * Fill in ablkcipher job descriptor
1212 */
1213static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1214 struct ablkcipher_edesc *edesc,
1215 struct ablkcipher_request *req,
1216 bool iv_contig)
1217{
1218 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1219 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1220 u32 *desc = edesc->hw_desc;
1221 u32 out_options = 0, in_options;
1222 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001223 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001224
1225#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001226 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001227 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1228 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001229 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001230 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1231 edesc->src_nents ? 100 : req->nbytes, 1);
1232#endif
1233
1234 len = desc_len(sh_desc);
1235 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1236
1237 if (iv_contig) {
1238 src_dma = edesc->iv_dma;
1239 in_options = 0;
1240 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001241 src_dma = edesc->sec4_sg_dma;
1242 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001243 in_options = LDST_SGF;
1244 }
1245 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1246
1247 if (likely(req->src == req->dst)) {
1248 if (!edesc->src_nents && iv_contig) {
1249 dst_dma = sg_dma_address(req->src);
1250 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001251 dst_dma = edesc->sec4_sg_dma +
1252 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001253 out_options = LDST_SGF;
1254 }
1255 } else {
1256 if (!edesc->dst_nents) {
1257 dst_dma = sg_dma_address(req->dst);
1258 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001259 dst_dma = edesc->sec4_sg_dma +
1260 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001261 out_options = LDST_SGF;
1262 }
1263 }
1264 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1265}
1266
1267/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001268 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001269 */
Yuan Kang0e479302011-07-15 11:21:41 +08001270static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02001271 int desc_bytes, bool *all_contig_ptr,
1272 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001273{
Yuan Kang0e479302011-07-15 11:21:41 +08001274 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001275 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1276 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001277 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1278 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1279 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001280 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001281 dma_addr_t iv_dma = 0;
1282 int sgc;
1283 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05001284 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08001285 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001286 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02001287 unsigned int authsize = ctx->authsize;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001288
Yuan Kang643b39b2012-06-22 19:48:49 -05001289 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001290
Horia Geantabbf9c892013-11-28 15:11:16 +02001291 if (unlikely(req->dst != req->src)) {
1292 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1293 dst_nents = sg_count(req->dst,
1294 req->cryptlen +
1295 (encrypt ? authsize : (-authsize)),
1296 &dst_chained);
1297 } else {
1298 src_nents = sg_count(req->src,
1299 req->cryptlen +
1300 (encrypt ? authsize : 0),
1301 &src_chained);
1302 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001303
Yuan Kang643b39b2012-06-22 19:48:49 -05001304 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001305 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001306 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001307 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1308 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001309 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001310 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1311 DMA_TO_DEVICE, src_chained);
1312 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1313 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001314 }
1315
Yuan Kang1acebad2011-07-15 11:21:42 +08001316 /* Check if data are contiguous */
1317 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1318 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1319 iv_dma || src_nents || iv_dma + ivsize !=
1320 sg_dma_address(req->src)) {
1321 all_contig = false;
1322 assoc_nents = assoc_nents ? : 1;
1323 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001324 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001325 }
Yuan Kanga299c832012-06-22 19:48:46 -05001326 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001327
Yuan Kanga299c832012-06-22 19:48:46 -05001328 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001329
1330 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001331 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001332 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001333 if (!edesc) {
1334 dev_err(jrdev, "could not allocate extended descriptor\n");
1335 return ERR_PTR(-ENOMEM);
1336 }
1337
1338 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001339 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001340 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001341 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001342 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001343 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001344 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001345 edesc->sec4_sg_bytes = sec4_sg_bytes;
1346 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1347 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001348 *all_contig_ptr = all_contig;
1349
Yuan Kanga299c832012-06-22 19:48:46 -05001350 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001351 if (!all_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001352 sg_to_sec4_sg(req->assoc,
1353 (assoc_nents ? : 1),
1354 edesc->sec4_sg +
1355 sec4_sg_index, 0);
1356 sec4_sg_index += assoc_nents ? : 1;
1357 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001358 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001359 sec4_sg_index += 1;
1360 sg_to_sec4_sg_last(req->src,
1361 (src_nents ? : 1),
1362 edesc->sec4_sg +
1363 sec4_sg_index, 0);
1364 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08001365 }
1366 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001367 sg_to_sec4_sg_last(req->dst, dst_nents,
1368 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001369 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301370 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1371 sec4_sg_bytes, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001372
1373 return edesc;
1374}
1375
Yuan Kang0e479302011-07-15 11:21:41 +08001376static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001377{
Yuan Kang0e479302011-07-15 11:21:41 +08001378 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001379 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001380 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1381 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001382 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001383 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001384 int ret = 0;
1385
Kim Phillips8e8ec592011-03-13 16:54:26 +08001386 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001387 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001388 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001389 if (IS_ERR(edesc))
1390 return PTR_ERR(edesc);
1391
Yuan Kang1acebad2011-07-15 11:21:42 +08001392 /* Create and submit job descriptor */
1393 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1394 all_contig, true);
1395#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001396 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001397 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1398 desc_bytes(edesc->hw_desc), 1);
1399#endif
1400
Kim Phillips8e8ec592011-03-13 16:54:26 +08001401 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001402 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1403 if (!ret) {
1404 ret = -EINPROGRESS;
1405 } else {
1406 aead_unmap(jrdev, edesc, req);
1407 kfree(edesc);
1408 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001409
Yuan Kang1acebad2011-07-15 11:21:42 +08001410 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001411}
1412
Yuan Kang0e479302011-07-15 11:21:41 +08001413static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001414{
Yuan Kang1acebad2011-07-15 11:21:42 +08001415 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001416 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08001417 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1418 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001419 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08001420 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001421 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001422
1423 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001424 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001425 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08001426 if (IS_ERR(edesc))
1427 return PTR_ERR(edesc);
1428
Yuan Kang1acebad2011-07-15 11:21:42 +08001429#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001430 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001431 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1432 req->cryptlen, 1);
1433#endif
1434
1435 /* Create and submit job descriptor*/
1436 init_aead_job(ctx->sh_desc_dec,
1437 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1438#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001439 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001440 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1441 desc_bytes(edesc->hw_desc), 1);
1442#endif
1443
Yuan Kang0e479302011-07-15 11:21:41 +08001444 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001445 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1446 if (!ret) {
1447 ret = -EINPROGRESS;
1448 } else {
1449 aead_unmap(jrdev, edesc, req);
1450 kfree(edesc);
1451 }
Yuan Kang0e479302011-07-15 11:21:41 +08001452
Yuan Kang1acebad2011-07-15 11:21:42 +08001453 return ret;
1454}
Yuan Kang0e479302011-07-15 11:21:41 +08001455
Yuan Kang1acebad2011-07-15 11:21:42 +08001456/*
1457 * allocate and map the aead extended descriptor for aead givencrypt
1458 */
1459static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1460 *greq, int desc_bytes,
1461 u32 *contig_ptr)
1462{
1463 struct aead_request *req = &greq->areq;
1464 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1465 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1466 struct device *jrdev = ctx->jrdev;
1467 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1468 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1469 int assoc_nents, src_nents, dst_nents = 0;
1470 struct aead_edesc *edesc;
1471 dma_addr_t iv_dma = 0;
1472 int sgc;
1473 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1474 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05001475 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001476 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Yuan Kang0e479302011-07-15 11:21:41 +08001477
Yuan Kang643b39b2012-06-22 19:48:49 -05001478 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1479 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08001480
Yuan Kang1acebad2011-07-15 11:21:42 +08001481 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02001482 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1483 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001484
Yuan Kang643b39b2012-06-22 19:48:49 -05001485 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001486 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001487 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001488 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1489 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001490 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001491 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1492 DMA_TO_DEVICE, src_chained);
1493 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1494 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001495 }
1496
1497 /* Check if data are contiguous */
1498 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1499 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1500 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1501 contig &= ~GIV_SRC_CONTIG;
1502 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1503 contig &= ~GIV_DST_CONTIG;
Kim Phillips2af8f4a2012-09-07 04:17:03 +08001504 if (unlikely(req->src != req->dst)) {
1505 dst_nents = dst_nents ? : 1;
1506 sec4_sg_len += 1;
1507 }
Yuan Kang1acebad2011-07-15 11:21:42 +08001508 if (!(contig & GIV_SRC_CONTIG)) {
1509 assoc_nents = assoc_nents ? : 1;
1510 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001511 sec4_sg_len += assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001512 if (likely(req->src == req->dst))
1513 contig &= ~GIV_DST_CONTIG;
1514 }
Yuan Kanga299c832012-06-22 19:48:46 -05001515 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001516
Yuan Kanga299c832012-06-22 19:48:46 -05001517 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001518
1519 /* allocate space for base edesc and hw desc commands, link tables */
1520 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001521 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08001522 if (!edesc) {
1523 dev_err(jrdev, "could not allocate extended descriptor\n");
1524 return ERR_PTR(-ENOMEM);
1525 }
1526
1527 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001528 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001529 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001530 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001531 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001532 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001533 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001534 edesc->sec4_sg_bytes = sec4_sg_bytes;
1535 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1536 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001537 *contig_ptr = contig;
1538
Yuan Kanga299c832012-06-22 19:48:46 -05001539 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001540 if (!(contig & GIV_SRC_CONTIG)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001541 sg_to_sec4_sg(req->assoc, assoc_nents,
1542 edesc->sec4_sg +
1543 sec4_sg_index, 0);
1544 sec4_sg_index += assoc_nents;
1545 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001546 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001547 sec4_sg_index += 1;
1548 sg_to_sec4_sg_last(req->src, src_nents,
1549 edesc->sec4_sg +
1550 sec4_sg_index, 0);
1551 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001552 }
1553 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05001554 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001555 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001556 sec4_sg_index += 1;
1557 sg_to_sec4_sg_last(req->dst, dst_nents,
1558 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001559 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301560 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1561 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kang1acebad2011-07-15 11:21:42 +08001562
1563 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001564}
1565
1566static int aead_givencrypt(struct aead_givcrypt_request *areq)
1567{
1568 struct aead_request *req = &areq->areq;
1569 struct aead_edesc *edesc;
1570 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001571 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1572 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001573 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001574 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001575 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001576
Kim Phillips8e8ec592011-03-13 16:54:26 +08001577 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001578 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1579 CAAM_CMD_SZ, &contig);
1580
Kim Phillips8e8ec592011-03-13 16:54:26 +08001581 if (IS_ERR(edesc))
1582 return PTR_ERR(edesc);
1583
Yuan Kang1acebad2011-07-15 11:21:42 +08001584#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001585 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001586 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1587 req->cryptlen, 1);
1588#endif
1589
1590 /* Create and submit job descriptor*/
1591 init_aead_giv_job(ctx->sh_desc_givenc,
1592 ctx->sh_desc_givenc_dma, edesc, req, contig);
1593#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001594 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001595 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1596 desc_bytes(edesc->hw_desc), 1);
1597#endif
1598
Kim Phillips8e8ec592011-03-13 16:54:26 +08001599 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001600 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1601 if (!ret) {
1602 ret = -EINPROGRESS;
1603 } else {
1604 aead_unmap(jrdev, edesc, req);
1605 kfree(edesc);
1606 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001607
Yuan Kang1acebad2011-07-15 11:21:42 +08001608 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001609}
1610
Horia Geantaae4a8252014-03-14 17:46:52 +02001611static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1612{
1613 return aead_encrypt(&areq->areq);
1614}
1615
Yuan Kangacdca312011-07-15 11:21:42 +08001616/*
1617 * allocate and map the ablkcipher extended descriptor for ablkcipher
1618 */
1619static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1620 *req, int desc_bytes,
1621 bool *iv_contig_out)
1622{
1623 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1624 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1625 struct device *jrdev = ctx->jrdev;
1626 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1627 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1628 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05001629 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001630 struct ablkcipher_edesc *edesc;
1631 dma_addr_t iv_dma = 0;
1632 bool iv_contig = false;
1633 int sgc;
1634 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05001635 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001636 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08001637
Yuan Kang643b39b2012-06-22 19:48:49 -05001638 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001639
Yuan Kang643b39b2012-06-22 19:48:49 -05001640 if (req->dst != req->src)
1641 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001642
1643 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001644 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1645 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001646 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001647 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1648 DMA_TO_DEVICE, src_chained);
1649 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1650 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001651 }
1652
1653 /*
1654 * Check if iv can be contiguous with source and destination.
1655 * If so, include it. If not, create scatterlist.
1656 */
1657 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1658 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1659 iv_contig = true;
1660 else
1661 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001662 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1663 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001664
1665 /* allocate space for base edesc and hw desc commands, link tables */
1666 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001667 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08001668 if (!edesc) {
1669 dev_err(jrdev, "could not allocate extended descriptor\n");
1670 return ERR_PTR(-ENOMEM);
1671 }
1672
1673 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001674 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001675 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001676 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05001677 edesc->sec4_sg_bytes = sec4_sg_bytes;
1678 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1679 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001680
Yuan Kanga299c832012-06-22 19:48:46 -05001681 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001682 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001683 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1684 sg_to_sec4_sg_last(req->src, src_nents,
1685 edesc->sec4_sg + 1, 0);
1686 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001687 }
1688
Yuan Kang643b39b2012-06-22 19:48:49 -05001689 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001690 sg_to_sec4_sg_last(req->dst, dst_nents,
1691 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08001692 }
1693
Yuan Kanga299c832012-06-22 19:48:46 -05001694 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1695 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08001696 edesc->iv_dma = iv_dma;
1697
1698#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001699 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05001700 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1701 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001702#endif
1703
1704 *iv_contig_out = iv_contig;
1705 return edesc;
1706}
1707
1708static int ablkcipher_encrypt(struct ablkcipher_request *req)
1709{
1710 struct ablkcipher_edesc *edesc;
1711 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1712 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1713 struct device *jrdev = ctx->jrdev;
1714 bool iv_contig;
1715 u32 *desc;
1716 int ret = 0;
1717
1718 /* allocate extended descriptor */
1719 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1720 CAAM_CMD_SZ, &iv_contig);
1721 if (IS_ERR(edesc))
1722 return PTR_ERR(edesc);
1723
1724 /* Create and submit job descriptor*/
1725 init_ablkcipher_job(ctx->sh_desc_enc,
1726 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1727#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001728 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001729 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1730 desc_bytes(edesc->hw_desc), 1);
1731#endif
1732 desc = edesc->hw_desc;
1733 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1734
1735 if (!ret) {
1736 ret = -EINPROGRESS;
1737 } else {
1738 ablkcipher_unmap(jrdev, edesc, req);
1739 kfree(edesc);
1740 }
1741
1742 return ret;
1743}
1744
1745static int ablkcipher_decrypt(struct ablkcipher_request *req)
1746{
1747 struct ablkcipher_edesc *edesc;
1748 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1749 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1750 struct device *jrdev = ctx->jrdev;
1751 bool iv_contig;
1752 u32 *desc;
1753 int ret = 0;
1754
1755 /* allocate extended descriptor */
1756 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1757 CAAM_CMD_SZ, &iv_contig);
1758 if (IS_ERR(edesc))
1759 return PTR_ERR(edesc);
1760
1761 /* Create and submit job descriptor*/
1762 init_ablkcipher_job(ctx->sh_desc_dec,
1763 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1764 desc = edesc->hw_desc;
1765#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001766 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001767 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1768 desc_bytes(edesc->hw_desc), 1);
1769#endif
1770
1771 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1772 if (!ret) {
1773 ret = -EINPROGRESS;
1774 } else {
1775 ablkcipher_unmap(jrdev, edesc, req);
1776 kfree(edesc);
1777 }
1778
1779 return ret;
1780}
1781
Yuan Kang885e9e22011-07-15 11:21:41 +08001782#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08001783#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08001784struct caam_alg_template {
1785 char name[CRYPTO_MAX_ALG_NAME];
1786 char driver_name[CRYPTO_MAX_ALG_NAME];
1787 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08001788 u32 type;
1789 union {
1790 struct ablkcipher_alg ablkcipher;
1791 struct aead_alg aead;
1792 struct blkcipher_alg blkcipher;
1793 struct cipher_alg cipher;
1794 struct compress_alg compress;
1795 struct rng_alg rng;
1796 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001797 u32 class1_alg_type;
1798 u32 class2_alg_type;
1799 u32 alg_op;
1800};
1801
1802static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02001803 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08001804 {
Horia Geantaae4a8252014-03-14 17:46:52 +02001805 .name = "authenc(hmac(md5),ecb(cipher_null))",
1806 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1807 .blocksize = NULL_BLOCK_SIZE,
1808 .type = CRYPTO_ALG_TYPE_AEAD,
1809 .template_aead = {
1810 .setkey = aead_setkey,
1811 .setauthsize = aead_setauthsize,
1812 .encrypt = aead_encrypt,
1813 .decrypt = aead_decrypt,
1814 .givencrypt = aead_null_givencrypt,
1815 .geniv = "<built-in>",
1816 .ivsize = NULL_IV_SIZE,
1817 .maxauthsize = MD5_DIGEST_SIZE,
1818 },
1819 .class1_alg_type = 0,
1820 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1821 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1822 },
1823 {
1824 .name = "authenc(hmac(sha1),ecb(cipher_null))",
1825 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1826 .blocksize = NULL_BLOCK_SIZE,
1827 .type = CRYPTO_ALG_TYPE_AEAD,
1828 .template_aead = {
1829 .setkey = aead_setkey,
1830 .setauthsize = aead_setauthsize,
1831 .encrypt = aead_encrypt,
1832 .decrypt = aead_decrypt,
1833 .givencrypt = aead_null_givencrypt,
1834 .geniv = "<built-in>",
1835 .ivsize = NULL_IV_SIZE,
1836 .maxauthsize = SHA1_DIGEST_SIZE,
1837 },
1838 .class1_alg_type = 0,
1839 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1840 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1841 },
1842 {
1843 .name = "authenc(hmac(sha224),ecb(cipher_null))",
1844 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1845 .blocksize = NULL_BLOCK_SIZE,
1846 .type = CRYPTO_ALG_TYPE_AEAD,
1847 .template_aead = {
1848 .setkey = aead_setkey,
1849 .setauthsize = aead_setauthsize,
1850 .encrypt = aead_encrypt,
1851 .decrypt = aead_decrypt,
1852 .givencrypt = aead_null_givencrypt,
1853 .geniv = "<built-in>",
1854 .ivsize = NULL_IV_SIZE,
1855 .maxauthsize = SHA224_DIGEST_SIZE,
1856 },
1857 .class1_alg_type = 0,
1858 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1859 OP_ALG_AAI_HMAC_PRECOMP,
1860 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1861 },
1862 {
1863 .name = "authenc(hmac(sha256),ecb(cipher_null))",
1864 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1865 .blocksize = NULL_BLOCK_SIZE,
1866 .type = CRYPTO_ALG_TYPE_AEAD,
1867 .template_aead = {
1868 .setkey = aead_setkey,
1869 .setauthsize = aead_setauthsize,
1870 .encrypt = aead_encrypt,
1871 .decrypt = aead_decrypt,
1872 .givencrypt = aead_null_givencrypt,
1873 .geniv = "<built-in>",
1874 .ivsize = NULL_IV_SIZE,
1875 .maxauthsize = SHA256_DIGEST_SIZE,
1876 },
1877 .class1_alg_type = 0,
1878 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1879 OP_ALG_AAI_HMAC_PRECOMP,
1880 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1881 },
1882 {
1883 .name = "authenc(hmac(sha384),ecb(cipher_null))",
1884 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1885 .blocksize = NULL_BLOCK_SIZE,
1886 .type = CRYPTO_ALG_TYPE_AEAD,
1887 .template_aead = {
1888 .setkey = aead_setkey,
1889 .setauthsize = aead_setauthsize,
1890 .encrypt = aead_encrypt,
1891 .decrypt = aead_decrypt,
1892 .givencrypt = aead_null_givencrypt,
1893 .geniv = "<built-in>",
1894 .ivsize = NULL_IV_SIZE,
1895 .maxauthsize = SHA384_DIGEST_SIZE,
1896 },
1897 .class1_alg_type = 0,
1898 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1899 OP_ALG_AAI_HMAC_PRECOMP,
1900 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1901 },
1902 {
1903 .name = "authenc(hmac(sha512),ecb(cipher_null))",
1904 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1905 .blocksize = NULL_BLOCK_SIZE,
1906 .type = CRYPTO_ALG_TYPE_AEAD,
1907 .template_aead = {
1908 .setkey = aead_setkey,
1909 .setauthsize = aead_setauthsize,
1910 .encrypt = aead_encrypt,
1911 .decrypt = aead_decrypt,
1912 .givencrypt = aead_null_givencrypt,
1913 .geniv = "<built-in>",
1914 .ivsize = NULL_IV_SIZE,
1915 .maxauthsize = SHA512_DIGEST_SIZE,
1916 },
1917 .class1_alg_type = 0,
1918 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1919 OP_ALG_AAI_HMAC_PRECOMP,
1920 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1921 },
1922 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001923 .name = "authenc(hmac(md5),cbc(aes))",
1924 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1925 .blocksize = AES_BLOCK_SIZE,
1926 .type = CRYPTO_ALG_TYPE_AEAD,
1927 .template_aead = {
1928 .setkey = aead_setkey,
1929 .setauthsize = aead_setauthsize,
1930 .encrypt = aead_encrypt,
1931 .decrypt = aead_decrypt,
1932 .givencrypt = aead_givencrypt,
1933 .geniv = "<built-in>",
1934 .ivsize = AES_BLOCK_SIZE,
1935 .maxauthsize = MD5_DIGEST_SIZE,
1936 },
1937 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1938 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1939 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1940 },
1941 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001942 .name = "authenc(hmac(sha1),cbc(aes))",
1943 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1944 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001945 .type = CRYPTO_ALG_TYPE_AEAD,
1946 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001947 .setkey = aead_setkey,
1948 .setauthsize = aead_setauthsize,
1949 .encrypt = aead_encrypt,
1950 .decrypt = aead_decrypt,
1951 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001952 .geniv = "<built-in>",
1953 .ivsize = AES_BLOCK_SIZE,
1954 .maxauthsize = SHA1_DIGEST_SIZE,
1955 },
1956 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1957 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1958 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1959 },
1960 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001961 .name = "authenc(hmac(sha224),cbc(aes))",
1962 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1963 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301964 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001965 .template_aead = {
1966 .setkey = aead_setkey,
1967 .setauthsize = aead_setauthsize,
1968 .encrypt = aead_encrypt,
1969 .decrypt = aead_decrypt,
1970 .givencrypt = aead_givencrypt,
1971 .geniv = "<built-in>",
1972 .ivsize = AES_BLOCK_SIZE,
1973 .maxauthsize = SHA224_DIGEST_SIZE,
1974 },
1975 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1976 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1977 OP_ALG_AAI_HMAC_PRECOMP,
1978 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1979 },
1980 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001981 .name = "authenc(hmac(sha256),cbc(aes))",
1982 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1983 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001984 .type = CRYPTO_ALG_TYPE_AEAD,
1985 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001986 .setkey = aead_setkey,
1987 .setauthsize = aead_setauthsize,
1988 .encrypt = aead_encrypt,
1989 .decrypt = aead_decrypt,
1990 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001991 .geniv = "<built-in>",
1992 .ivsize = AES_BLOCK_SIZE,
1993 .maxauthsize = SHA256_DIGEST_SIZE,
1994 },
1995 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1996 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1997 OP_ALG_AAI_HMAC_PRECOMP,
1998 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1999 },
2000 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002001 .name = "authenc(hmac(sha384),cbc(aes))",
2002 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2003 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302004 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002005 .template_aead = {
2006 .setkey = aead_setkey,
2007 .setauthsize = aead_setauthsize,
2008 .encrypt = aead_encrypt,
2009 .decrypt = aead_decrypt,
2010 .givencrypt = aead_givencrypt,
2011 .geniv = "<built-in>",
2012 .ivsize = AES_BLOCK_SIZE,
2013 .maxauthsize = SHA384_DIGEST_SIZE,
2014 },
2015 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2016 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2017 OP_ALG_AAI_HMAC_PRECOMP,
2018 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2019 },
2020
2021 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002022 .name = "authenc(hmac(sha512),cbc(aes))",
2023 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2024 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002025 .type = CRYPTO_ALG_TYPE_AEAD,
2026 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002027 .setkey = aead_setkey,
2028 .setauthsize = aead_setauthsize,
2029 .encrypt = aead_encrypt,
2030 .decrypt = aead_decrypt,
2031 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002032 .geniv = "<built-in>",
2033 .ivsize = AES_BLOCK_SIZE,
2034 .maxauthsize = SHA512_DIGEST_SIZE,
2035 },
2036 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2037 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2038 OP_ALG_AAI_HMAC_PRECOMP,
2039 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2040 },
2041 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002042 .name = "authenc(hmac(md5),cbc(des3_ede))",
2043 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2044 .blocksize = DES3_EDE_BLOCK_SIZE,
2045 .type = CRYPTO_ALG_TYPE_AEAD,
2046 .template_aead = {
2047 .setkey = aead_setkey,
2048 .setauthsize = aead_setauthsize,
2049 .encrypt = aead_encrypt,
2050 .decrypt = aead_decrypt,
2051 .givencrypt = aead_givencrypt,
2052 .geniv = "<built-in>",
2053 .ivsize = DES3_EDE_BLOCK_SIZE,
2054 .maxauthsize = MD5_DIGEST_SIZE,
2055 },
2056 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2057 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2058 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2059 },
2060 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002061 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2062 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2063 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002064 .type = CRYPTO_ALG_TYPE_AEAD,
2065 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002066 .setkey = aead_setkey,
2067 .setauthsize = aead_setauthsize,
2068 .encrypt = aead_encrypt,
2069 .decrypt = aead_decrypt,
2070 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002071 .geniv = "<built-in>",
2072 .ivsize = DES3_EDE_BLOCK_SIZE,
2073 .maxauthsize = SHA1_DIGEST_SIZE,
2074 },
2075 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2076 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2077 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2078 },
2079 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002080 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2081 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2082 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302083 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002084 .template_aead = {
2085 .setkey = aead_setkey,
2086 .setauthsize = aead_setauthsize,
2087 .encrypt = aead_encrypt,
2088 .decrypt = aead_decrypt,
2089 .givencrypt = aead_givencrypt,
2090 .geniv = "<built-in>",
2091 .ivsize = DES3_EDE_BLOCK_SIZE,
2092 .maxauthsize = SHA224_DIGEST_SIZE,
2093 },
2094 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2095 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2096 OP_ALG_AAI_HMAC_PRECOMP,
2097 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2098 },
2099 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002100 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2101 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2102 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002103 .type = CRYPTO_ALG_TYPE_AEAD,
2104 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002105 .setkey = aead_setkey,
2106 .setauthsize = aead_setauthsize,
2107 .encrypt = aead_encrypt,
2108 .decrypt = aead_decrypt,
2109 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002110 .geniv = "<built-in>",
2111 .ivsize = DES3_EDE_BLOCK_SIZE,
2112 .maxauthsize = SHA256_DIGEST_SIZE,
2113 },
2114 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2115 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2116 OP_ALG_AAI_HMAC_PRECOMP,
2117 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2118 },
2119 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002120 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2121 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2122 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302123 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002124 .template_aead = {
2125 .setkey = aead_setkey,
2126 .setauthsize = aead_setauthsize,
2127 .encrypt = aead_encrypt,
2128 .decrypt = aead_decrypt,
2129 .givencrypt = aead_givencrypt,
2130 .geniv = "<built-in>",
2131 .ivsize = DES3_EDE_BLOCK_SIZE,
2132 .maxauthsize = SHA384_DIGEST_SIZE,
2133 },
2134 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2136 OP_ALG_AAI_HMAC_PRECOMP,
2137 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2138 },
2139 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002140 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2141 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2142 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002143 .type = CRYPTO_ALG_TYPE_AEAD,
2144 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002145 .setkey = aead_setkey,
2146 .setauthsize = aead_setauthsize,
2147 .encrypt = aead_encrypt,
2148 .decrypt = aead_decrypt,
2149 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002150 .geniv = "<built-in>",
2151 .ivsize = DES3_EDE_BLOCK_SIZE,
2152 .maxauthsize = SHA512_DIGEST_SIZE,
2153 },
2154 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2155 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2156 OP_ALG_AAI_HMAC_PRECOMP,
2157 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2158 },
2159 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002160 .name = "authenc(hmac(md5),cbc(des))",
2161 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2162 .blocksize = DES_BLOCK_SIZE,
2163 .type = CRYPTO_ALG_TYPE_AEAD,
2164 .template_aead = {
2165 .setkey = aead_setkey,
2166 .setauthsize = aead_setauthsize,
2167 .encrypt = aead_encrypt,
2168 .decrypt = aead_decrypt,
2169 .givencrypt = aead_givencrypt,
2170 .geniv = "<built-in>",
2171 .ivsize = DES_BLOCK_SIZE,
2172 .maxauthsize = MD5_DIGEST_SIZE,
2173 },
2174 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2175 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2176 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2177 },
2178 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002179 .name = "authenc(hmac(sha1),cbc(des))",
2180 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2181 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002182 .type = CRYPTO_ALG_TYPE_AEAD,
2183 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002184 .setkey = aead_setkey,
2185 .setauthsize = aead_setauthsize,
2186 .encrypt = aead_encrypt,
2187 .decrypt = aead_decrypt,
2188 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002189 .geniv = "<built-in>",
2190 .ivsize = DES_BLOCK_SIZE,
2191 .maxauthsize = SHA1_DIGEST_SIZE,
2192 },
2193 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2194 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2195 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2196 },
2197 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002198 .name = "authenc(hmac(sha224),cbc(des))",
2199 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2200 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302201 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002202 .template_aead = {
2203 .setkey = aead_setkey,
2204 .setauthsize = aead_setauthsize,
2205 .encrypt = aead_encrypt,
2206 .decrypt = aead_decrypt,
2207 .givencrypt = aead_givencrypt,
2208 .geniv = "<built-in>",
2209 .ivsize = DES_BLOCK_SIZE,
2210 .maxauthsize = SHA224_DIGEST_SIZE,
2211 },
2212 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2213 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2214 OP_ALG_AAI_HMAC_PRECOMP,
2215 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2216 },
2217 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002218 .name = "authenc(hmac(sha256),cbc(des))",
2219 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2220 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002221 .type = CRYPTO_ALG_TYPE_AEAD,
2222 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002223 .setkey = aead_setkey,
2224 .setauthsize = aead_setauthsize,
2225 .encrypt = aead_encrypt,
2226 .decrypt = aead_decrypt,
2227 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002228 .geniv = "<built-in>",
2229 .ivsize = DES_BLOCK_SIZE,
2230 .maxauthsize = SHA256_DIGEST_SIZE,
2231 },
2232 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2233 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2234 OP_ALG_AAI_HMAC_PRECOMP,
2235 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2236 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05002237 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002238 .name = "authenc(hmac(sha384),cbc(des))",
2239 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2240 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302241 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002242 .template_aead = {
2243 .setkey = aead_setkey,
2244 .setauthsize = aead_setauthsize,
2245 .encrypt = aead_encrypt,
2246 .decrypt = aead_decrypt,
2247 .givencrypt = aead_givencrypt,
2248 .geniv = "<built-in>",
2249 .ivsize = DES_BLOCK_SIZE,
2250 .maxauthsize = SHA384_DIGEST_SIZE,
2251 },
2252 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2253 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2254 OP_ALG_AAI_HMAC_PRECOMP,
2255 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2256 },
2257 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002258 .name = "authenc(hmac(sha512),cbc(des))",
2259 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2260 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002261 .type = CRYPTO_ALG_TYPE_AEAD,
2262 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002263 .setkey = aead_setkey,
2264 .setauthsize = aead_setauthsize,
2265 .encrypt = aead_encrypt,
2266 .decrypt = aead_decrypt,
2267 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002268 .geniv = "<built-in>",
2269 .ivsize = DES_BLOCK_SIZE,
2270 .maxauthsize = SHA512_DIGEST_SIZE,
2271 },
2272 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2273 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2274 OP_ALG_AAI_HMAC_PRECOMP,
2275 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2276 },
Yuan Kangacdca312011-07-15 11:21:42 +08002277 /* ablkcipher descriptor */
2278 {
2279 .name = "cbc(aes)",
2280 .driver_name = "cbc-aes-caam",
2281 .blocksize = AES_BLOCK_SIZE,
2282 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2283 .template_ablkcipher = {
2284 .setkey = ablkcipher_setkey,
2285 .encrypt = ablkcipher_encrypt,
2286 .decrypt = ablkcipher_decrypt,
2287 .geniv = "eseqiv",
2288 .min_keysize = AES_MIN_KEY_SIZE,
2289 .max_keysize = AES_MAX_KEY_SIZE,
2290 .ivsize = AES_BLOCK_SIZE,
2291 },
2292 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2293 },
2294 {
2295 .name = "cbc(des3_ede)",
2296 .driver_name = "cbc-3des-caam",
2297 .blocksize = DES3_EDE_BLOCK_SIZE,
2298 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2299 .template_ablkcipher = {
2300 .setkey = ablkcipher_setkey,
2301 .encrypt = ablkcipher_encrypt,
2302 .decrypt = ablkcipher_decrypt,
2303 .geniv = "eseqiv",
2304 .min_keysize = DES3_EDE_KEY_SIZE,
2305 .max_keysize = DES3_EDE_KEY_SIZE,
2306 .ivsize = DES3_EDE_BLOCK_SIZE,
2307 },
2308 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2309 },
2310 {
2311 .name = "cbc(des)",
2312 .driver_name = "cbc-des-caam",
2313 .blocksize = DES_BLOCK_SIZE,
2314 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2315 .template_ablkcipher = {
2316 .setkey = ablkcipher_setkey,
2317 .encrypt = ablkcipher_encrypt,
2318 .decrypt = ablkcipher_decrypt,
2319 .geniv = "eseqiv",
2320 .min_keysize = DES_KEY_SIZE,
2321 .max_keysize = DES_KEY_SIZE,
2322 .ivsize = DES_BLOCK_SIZE,
2323 },
2324 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2325 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002326};
2327
2328struct caam_crypto_alg {
2329 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002330 int class1_alg_type;
2331 int class2_alg_type;
2332 int alg_op;
2333 struct crypto_alg crypto_alg;
2334};
2335
2336static int caam_cra_init(struct crypto_tfm *tfm)
2337{
2338 struct crypto_alg *alg = tfm->__crt_alg;
2339 struct caam_crypto_alg *caam_alg =
2340 container_of(alg, struct caam_crypto_alg, crypto_alg);
2341 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002342
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302343 ctx->jrdev = caam_jr_alloc();
2344 if (IS_ERR(ctx->jrdev)) {
2345 pr_err("Job Ring Device allocation for transform failed\n");
2346 return PTR_ERR(ctx->jrdev);
2347 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002348
2349 /* copy descriptor header template value */
2350 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2351 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2352 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2353
2354 return 0;
2355}
2356
2357static void caam_cra_exit(struct crypto_tfm *tfm)
2358{
2359 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2360
Yuan Kang1acebad2011-07-15 11:21:42 +08002361 if (ctx->sh_desc_enc_dma &&
2362 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2363 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2364 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2365 if (ctx->sh_desc_dec_dma &&
2366 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2367 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2368 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2369 if (ctx->sh_desc_givenc_dma &&
2370 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2371 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2372 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05002373 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02002374 if (ctx->key_dma &&
2375 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2376 dma_unmap_single(ctx->jrdev, ctx->key_dma,
2377 ctx->enckeylen + ctx->split_key_pad_len,
2378 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302379
2380 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002381}
2382
2383static void __exit caam_algapi_exit(void)
2384{
2385
Kim Phillips8e8ec592011-03-13 16:54:26 +08002386 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002387
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302388 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002389 return;
2390
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302391 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002392 crypto_unregister_alg(&t_alg->crypto_alg);
2393 list_del(&t_alg->entry);
2394 kfree(t_alg);
2395 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002396}
2397
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302398static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08002399 *template)
2400{
2401 struct caam_crypto_alg *t_alg;
2402 struct crypto_alg *alg;
2403
2404 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2405 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302406 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002407 return ERR_PTR(-ENOMEM);
2408 }
2409
2410 alg = &t_alg->crypto_alg;
2411
2412 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2413 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2414 template->driver_name);
2415 alg->cra_module = THIS_MODULE;
2416 alg->cra_init = caam_cra_init;
2417 alg->cra_exit = caam_cra_exit;
2418 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002419 alg->cra_blocksize = template->blocksize;
2420 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002421 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002422 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2423 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08002424 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08002425 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2426 alg->cra_type = &crypto_ablkcipher_type;
2427 alg->cra_ablkcipher = template->template_ablkcipher;
2428 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08002429 case CRYPTO_ALG_TYPE_AEAD:
2430 alg->cra_type = &crypto_aead_type;
2431 alg->cra_aead = template->template_aead;
2432 break;
2433 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002434
2435 t_alg->class1_alg_type = template->class1_alg_type;
2436 t_alg->class2_alg_type = template->class2_alg_type;
2437 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002438
2439 return t_alg;
2440}
2441
2442static int __init caam_algapi_init(void)
2443{
Kim Phillips8e8ec592011-03-13 16:54:26 +08002444 int i = 0, err = 0;
2445
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302446 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002447
2448 /* register crypto algorithms the device supports */
2449 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2450 /* TODO: check if h/w supports alg */
2451 struct caam_crypto_alg *t_alg;
2452
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302453 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002454 if (IS_ERR(t_alg)) {
2455 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302456 pr_warn("%s alg allocation failed\n",
2457 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002458 continue;
2459 }
2460
2461 err = crypto_register_alg(&t_alg->crypto_alg);
2462 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302463 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002464 t_alg->crypto_alg.cra_driver_name);
2465 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02002466 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302467 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002468 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302469 if (!list_empty(&alg_list))
2470 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002471
2472 return err;
2473}
2474
2475module_init(caam_algapi_init);
2476module_exit(caam_algapi_exit);
2477
2478MODULE_LICENSE("GPL");
2479MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2480MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");