blob: 36434d9eefad18c8f1ab293d1ddcad9700845e7b [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030077#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
78#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
79#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
80
Tudor Ambarusbac68f22014-10-23 16:14:03 +030081#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
82#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
83#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
84#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
85
Yuan Kangacdca312011-07-15 11:21:42 +080086#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
87#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
88 20 * CAAM_CMD_SZ)
89#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
90 15 * CAAM_CMD_SZ)
91
Yuan Kang1acebad2011-07-15 11:21:42 +080092#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
93 CAAM_MAX_KEY_SIZE)
94#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050095
Kim Phillips8e8ec592011-03-13 16:54:26 +080096#ifdef DEBUG
97/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080098#define debug(format, arg...) printk(format, arg)
99#else
100#define debug(format, arg...)
101#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530102static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800103
Yuan Kang1acebad2011-07-15 11:21:42 +0800104/* Set DK bit in class 1 operation if shared */
105static inline void append_dec_op1(u32 *desc, u32 type)
106{
107 u32 *jump_cmd, *uncond_jump_cmd;
108
Horia Geantaa60384d2014-07-11 15:46:58 +0300109 /* DK bit is valid only for AES */
110 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
111 append_operation(desc, type | OP_ALG_AS_INITFINAL |
112 OP_ALG_DECRYPT);
113 return;
114 }
115
Yuan Kang1acebad2011-07-15 11:21:42 +0800116 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
117 append_operation(desc, type | OP_ALG_AS_INITFINAL |
118 OP_ALG_DECRYPT);
119 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
120 set_jump_tgt_here(desc, jump_cmd);
121 append_operation(desc, type | OP_ALG_AS_INITFINAL |
122 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
123 set_jump_tgt_here(desc, uncond_jump_cmd);
124}
125
126/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800127 * For aead functions, read payload and write payload,
128 * both of which are specified in req->src and req->dst
129 */
130static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
131{
Horia Geantaae4a8252014-03-14 17:46:52 +0200132 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800133 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
134 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800135}
136
137/*
138 * For aead encrypt and decrypt, read iv for both classes
139 */
140static inline void aead_append_ld_iv(u32 *desc, int ivsize)
141{
142 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
143 LDST_CLASS_1_CCB | ivsize);
144 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
145}
146
147/*
Yuan Kangacdca312011-07-15 11:21:42 +0800148 * For ablkcipher encrypt and decrypt, read from req->src and
149 * write to req->dst
150 */
151static inline void ablkcipher_append_src_dst(u32 *desc)
152{
Kim Phillips70d793c2012-06-22 19:42:35 -0500153 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
154 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
155 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
156 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
157 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800158}
159
160/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800161 * If all data, including src (with assoc and iv) or dst (with iv only) are
162 * contiguous
163 */
164#define GIV_SRC_CONTIG 1
165#define GIV_DST_CONTIG (1 << 1)
166
Kim Phillips8e8ec592011-03-13 16:54:26 +0800167/*
168 * per-session context
169 */
170struct caam_ctx {
171 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800172 u32 sh_desc_enc[DESC_MAX_USED_LEN];
173 u32 sh_desc_dec[DESC_MAX_USED_LEN];
174 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
175 dma_addr_t sh_desc_enc_dma;
176 dma_addr_t sh_desc_dec_dma;
177 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800178 u32 class1_alg_type;
179 u32 class2_alg_type;
180 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800181 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800182 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800183 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800184 unsigned int split_key_len;
185 unsigned int split_key_pad_len;
186 unsigned int authsize;
187};
188
Yuan Kang1acebad2011-07-15 11:21:42 +0800189static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
190 int keys_fit_inline)
191{
192 if (keys_fit_inline) {
193 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
194 ctx->split_key_len, CLASS_2 |
195 KEY_DEST_MDHA_SPLIT | KEY_ENC);
196 append_key_as_imm(desc, (void *)ctx->key +
197 ctx->split_key_pad_len, ctx->enckeylen,
198 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
199 } else {
200 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
201 KEY_DEST_MDHA_SPLIT | KEY_ENC);
202 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
203 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
204 }
205}
206
207static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
208 int keys_fit_inline)
209{
210 u32 *key_jump_cmd;
211
Kim Phillips61bb86b2012-07-13 17:49:28 -0500212 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800213
214 /* Skip if already shared */
215 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
216 JUMP_COND_SHRD);
217
218 append_key_aead(desc, ctx, keys_fit_inline);
219
220 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800221}
222
Horia Geantaae4a8252014-03-14 17:46:52 +0200223static int aead_null_set_sh_desc(struct crypto_aead *aead)
224{
225 struct aead_tfm *tfm = &aead->base.crt_aead;
226 struct caam_ctx *ctx = crypto_aead_ctx(aead);
227 struct device *jrdev = ctx->jrdev;
228 bool keys_fit_inline = false;
229 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
230 u32 *desc;
231
232 /*
233 * Job Descriptor and Shared Descriptors
234 * must all fit into the 64-word Descriptor h/w Buffer
235 */
236 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
237 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
238 keys_fit_inline = true;
239
240 /* aead_encrypt shared descriptor */
241 desc = ctx->sh_desc_enc;
242
243 init_sh_desc(desc, HDR_SHARE_SERIAL);
244
245 /* Skip if already shared */
246 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
247 JUMP_COND_SHRD);
248 if (keys_fit_inline)
249 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
250 ctx->split_key_len, CLASS_2 |
251 KEY_DEST_MDHA_SPLIT | KEY_ENC);
252 else
253 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
254 KEY_DEST_MDHA_SPLIT | KEY_ENC);
255 set_jump_tgt_here(desc, key_jump_cmd);
256
257 /* cryptlen = seqoutlen - authsize */
258 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
259
260 /*
261 * NULL encryption; IV is zero
262 * assoclen = (assoclen + cryptlen) - cryptlen
263 */
264 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
265
266 /* read assoc before reading payload */
267 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
268 KEY_VLF);
269
270 /* Prepare to read and write cryptlen bytes */
271 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
272 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
273
274 /*
275 * MOVE_LEN opcode is not available in all SEC HW revisions,
276 * thus need to do some magic, i.e. self-patch the descriptor
277 * buffer.
278 */
279 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
280 MOVE_DEST_MATH3 |
281 (0x6 << MOVE_LEN_SHIFT));
282 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
283 MOVE_DEST_DESCBUF |
284 MOVE_WAITCOMP |
285 (0x8 << MOVE_LEN_SHIFT));
286
287 /* Class 2 operation */
288 append_operation(desc, ctx->class2_alg_type |
289 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
290
291 /* Read and write cryptlen bytes */
292 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
293
294 set_move_tgt_here(desc, read_move_cmd);
295 set_move_tgt_here(desc, write_move_cmd);
296 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
297 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
298 MOVE_AUX_LS);
299
300 /* Write ICV */
301 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
302 LDST_SRCDST_BYTE_CONTEXT);
303
304 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
305 desc_bytes(desc),
306 DMA_TO_DEVICE);
307 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
308 dev_err(jrdev, "unable to map shared descriptor\n");
309 return -ENOMEM;
310 }
311#ifdef DEBUG
312 print_hex_dump(KERN_ERR,
313 "aead null enc shdesc@"__stringify(__LINE__)": ",
314 DUMP_PREFIX_ADDRESS, 16, 4, desc,
315 desc_bytes(desc), 1);
316#endif
317
318 /*
319 * Job Descriptor and Shared Descriptors
320 * must all fit into the 64-word Descriptor h/w Buffer
321 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500322 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200323 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
324 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
325 keys_fit_inline = true;
326
327 desc = ctx->sh_desc_dec;
328
329 /* aead_decrypt shared descriptor */
330 init_sh_desc(desc, HDR_SHARE_SERIAL);
331
332 /* Skip if already shared */
333 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
334 JUMP_COND_SHRD);
335 if (keys_fit_inline)
336 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
337 ctx->split_key_len, CLASS_2 |
338 KEY_DEST_MDHA_SPLIT | KEY_ENC);
339 else
340 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
341 KEY_DEST_MDHA_SPLIT | KEY_ENC);
342 set_jump_tgt_here(desc, key_jump_cmd);
343
344 /* Class 2 operation */
345 append_operation(desc, ctx->class2_alg_type |
346 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
347
348 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
349 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
350 ctx->authsize + tfm->ivsize);
351 /* assoclen = (assoclen + cryptlen) - cryptlen */
352 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
353 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
354
355 /* read assoc before reading payload */
356 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
357 KEY_VLF);
358
359 /* Prepare to read and write cryptlen bytes */
360 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
361 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
362
363 /*
364 * MOVE_LEN opcode is not available in all SEC HW revisions,
365 * thus need to do some magic, i.e. self-patch the descriptor
366 * buffer.
367 */
368 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
369 MOVE_DEST_MATH2 |
370 (0x6 << MOVE_LEN_SHIFT));
371 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
372 MOVE_DEST_DESCBUF |
373 MOVE_WAITCOMP |
374 (0x8 << MOVE_LEN_SHIFT));
375
376 /* Read and write cryptlen bytes */
377 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
378
379 /*
380 * Insert a NOP here, since we need at least 4 instructions between
381 * code patching the descriptor buffer and the location being patched.
382 */
383 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
384 set_jump_tgt_here(desc, jump_cmd);
385
386 set_move_tgt_here(desc, read_move_cmd);
387 set_move_tgt_here(desc, write_move_cmd);
388 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
389 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
390 MOVE_AUX_LS);
391 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
392
393 /* Load ICV */
394 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
395 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
396
397 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
398 desc_bytes(desc),
399 DMA_TO_DEVICE);
400 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
401 dev_err(jrdev, "unable to map shared descriptor\n");
402 return -ENOMEM;
403 }
404#ifdef DEBUG
405 print_hex_dump(KERN_ERR,
406 "aead null dec shdesc@"__stringify(__LINE__)": ",
407 DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 desc_bytes(desc), 1);
409#endif
410
411 return 0;
412}
413
Yuan Kang1acebad2011-07-15 11:21:42 +0800414static int aead_set_sh_desc(struct crypto_aead *aead)
415{
416 struct aead_tfm *tfm = &aead->base.crt_aead;
417 struct caam_ctx *ctx = crypto_aead_ctx(aead);
418 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800419 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800420 u32 geniv, moveiv;
421 u32 *desc;
422
Horia Geantaae4a8252014-03-14 17:46:52 +0200423 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800424 return 0;
425
Horia Geantaae4a8252014-03-14 17:46:52 +0200426 /* NULL encryption / decryption */
427 if (!ctx->enckeylen)
428 return aead_null_set_sh_desc(aead);
429
Yuan Kang1acebad2011-07-15 11:21:42 +0800430 /*
431 * Job Descriptor and Shared Descriptors
432 * must all fit into the 64-word Descriptor h/w Buffer
433 */
434 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
435 ctx->split_key_pad_len + ctx->enckeylen <=
436 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800437 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800438
439 /* aead_encrypt shared descriptor */
440 desc = ctx->sh_desc_enc;
441
442 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
443
444 /* Class 2 operation */
445 append_operation(desc, ctx->class2_alg_type |
446 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
447
448 /* cryptlen = seqoutlen - authsize */
449 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
450
451 /* assoclen + cryptlen = seqinlen - ivsize */
452 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
453
Horia Geanta4464a7d2014-03-14 17:46:49 +0200454 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800455 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
456
457 /* read assoc before reading payload */
458 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
459 KEY_VLF);
460 aead_append_ld_iv(desc, tfm->ivsize);
461
462 /* Class 1 operation */
463 append_operation(desc, ctx->class1_alg_type |
464 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
465
466 /* Read and write cryptlen bytes */
467 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
468 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
469 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
470
471 /* Write ICV */
472 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
473 LDST_SRCDST_BYTE_CONTEXT);
474
475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
476 desc_bytes(desc),
477 DMA_TO_DEVICE);
478 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
479 dev_err(jrdev, "unable to map shared descriptor\n");
480 return -ENOMEM;
481 }
482#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300483 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800484 DUMP_PREFIX_ADDRESS, 16, 4, desc,
485 desc_bytes(desc), 1);
486#endif
487
488 /*
489 * Job Descriptor and Shared Descriptors
490 * must all fit into the 64-word Descriptor h/w Buffer
491 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500492 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800493 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
494 ctx->split_key_pad_len + ctx->enckeylen <=
495 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800496 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800497
Horia Geanta4464a7d2014-03-14 17:46:49 +0200498 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800499 desc = ctx->sh_desc_dec;
500
Horia Geanta4464a7d2014-03-14 17:46:49 +0200501 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800502
503 /* Class 2 operation */
504 append_operation(desc, ctx->class2_alg_type |
505 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
506
Horia Geanta4464a7d2014-03-14 17:46:49 +0200507 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800508 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200509 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800510 /* assoclen = (assoclen + cryptlen) - cryptlen */
511 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
512 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
513
514 /* read assoc before reading payload */
515 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
516 KEY_VLF);
517
518 aead_append_ld_iv(desc, tfm->ivsize);
519
520 append_dec_op1(desc, ctx->class1_alg_type);
521
522 /* Read and write cryptlen bytes */
523 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
524 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
525 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
526
527 /* Load ICV */
528 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
529 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800530
531 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
532 desc_bytes(desc),
533 DMA_TO_DEVICE);
534 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
535 dev_err(jrdev, "unable to map shared descriptor\n");
536 return -ENOMEM;
537 }
538#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300539 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800540 DUMP_PREFIX_ADDRESS, 16, 4, desc,
541 desc_bytes(desc), 1);
542#endif
543
544 /*
545 * Job Descriptor and Shared Descriptors
546 * must all fit into the 64-word Descriptor h/w Buffer
547 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500548 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800549 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
550 ctx->split_key_pad_len + ctx->enckeylen <=
551 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800552 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800553
554 /* aead_givencrypt shared descriptor */
555 desc = ctx->sh_desc_givenc;
556
557 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
558
559 /* Generate IV */
560 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
561 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
562 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
563 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
564 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
565 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
566 append_move(desc, MOVE_SRC_INFIFO |
567 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
568 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
569
570 /* Copy IV to class 1 context */
571 append_move(desc, MOVE_SRC_CLASS1CTX |
572 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
573
574 /* Return to encryption */
575 append_operation(desc, ctx->class2_alg_type |
576 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
577
578 /* ivsize + cryptlen = seqoutlen - authsize */
579 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
580
581 /* assoclen = seqinlen - (ivsize + cryptlen) */
582 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
583
584 /* read assoc before reading payload */
585 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
586 KEY_VLF);
587
588 /* Copy iv from class 1 ctx to class 2 fifo*/
589 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
590 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
591 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
592 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
593 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
594 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
595
596 /* Class 1 operation */
597 append_operation(desc, ctx->class1_alg_type |
598 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
599
600 /* Will write ivsize + cryptlen */
601 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
602
603 /* Not need to reload iv */
604 append_seq_fifo_load(desc, tfm->ivsize,
605 FIFOLD_CLASS_SKIP);
606
607 /* Will read cryptlen */
608 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
609 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
610
611 /* Write ICV */
612 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
613 LDST_SRCDST_BYTE_CONTEXT);
614
615 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
616 desc_bytes(desc),
617 DMA_TO_DEVICE);
618 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
619 dev_err(jrdev, "unable to map shared descriptor\n");
620 return -ENOMEM;
621 }
622#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300623 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800624 DUMP_PREFIX_ADDRESS, 16, 4, desc,
625 desc_bytes(desc), 1);
626#endif
627
628 return 0;
629}
630
Yuan Kang0e479302011-07-15 11:21:41 +0800631static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800632 unsigned int authsize)
633{
634 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
635
636 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800637 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800638
639 return 0;
640}
641
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300642static int gcm_set_sh_desc(struct crypto_aead *aead)
643{
644 struct aead_tfm *tfm = &aead->base.crt_aead;
645 struct caam_ctx *ctx = crypto_aead_ctx(aead);
646 struct device *jrdev = ctx->jrdev;
647 bool keys_fit_inline = false;
648 u32 *key_jump_cmd, *zero_payload_jump_cmd,
649 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
650 u32 *desc;
651
652 if (!ctx->enckeylen || !ctx->authsize)
653 return 0;
654
655 /*
656 * AES GCM encrypt shared descriptor
657 * Job Descriptor and Shared Descriptor
658 * must fit into the 64-word Descriptor h/w Buffer
659 */
660 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
661 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
662 keys_fit_inline = true;
663
664 desc = ctx->sh_desc_enc;
665
666 init_sh_desc(desc, HDR_SHARE_SERIAL);
667
668 /* skip key loading if they are loaded due to sharing */
669 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
670 JUMP_COND_SHRD | JUMP_COND_SELF);
671 if (keys_fit_inline)
672 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
673 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
674 else
675 append_key(desc, ctx->key_dma, ctx->enckeylen,
676 CLASS_1 | KEY_DEST_CLASS_REG);
677 set_jump_tgt_here(desc, key_jump_cmd);
678
679 /* class 1 operation */
680 append_operation(desc, ctx->class1_alg_type |
681 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
682
683 /* cryptlen = seqoutlen - authsize */
684 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
685
686 /* assoclen + cryptlen = seqinlen - ivsize */
687 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
688
689 /* assoclen = (assoclen + cryptlen) - cryptlen */
690 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
691
692 /* if cryptlen is ZERO jump to zero-payload commands */
693 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
694 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
695 JUMP_COND_MATH_Z);
696 /* read IV */
697 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
698 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
699
700 /* if assoclen is ZERO, skip reading the assoc data */
701 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
702 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
703 JUMP_COND_MATH_Z);
704
705 /* read assoc data */
706 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
707 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
708 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
709
710 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
711
712 /* write encrypted data */
713 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
714
715 /* read payload data */
716 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
717 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
718
719 /* jump the zero-payload commands */
720 append_jump(desc, JUMP_TEST_ALL | 7);
721
722 /* zero-payload commands */
723 set_jump_tgt_here(desc, zero_payload_jump_cmd);
724
725 /* if assoclen is ZERO, jump to IV reading - is the only input data */
726 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
727 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
728 JUMP_COND_MATH_Z);
729 /* read IV */
730 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
731 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
732
733 /* read assoc data */
734 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
735 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
736
737 /* jump to ICV writing */
738 append_jump(desc, JUMP_TEST_ALL | 2);
739
740 /* read IV - is the only input data */
741 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
742 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
743 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
744 FIFOLD_TYPE_LAST1);
745
746 /* write ICV */
747 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
748 LDST_SRCDST_BYTE_CONTEXT);
749
750 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
751 desc_bytes(desc),
752 DMA_TO_DEVICE);
753 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
754 dev_err(jrdev, "unable to map shared descriptor\n");
755 return -ENOMEM;
756 }
757#ifdef DEBUG
758 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
759 DUMP_PREFIX_ADDRESS, 16, 4, desc,
760 desc_bytes(desc), 1);
761#endif
762
763 /*
764 * Job Descriptor and Shared Descriptors
765 * must all fit into the 64-word Descriptor h/w Buffer
766 */
767 keys_fit_inline = false;
768 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
769 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
770 keys_fit_inline = true;
771
772 desc = ctx->sh_desc_dec;
773
774 init_sh_desc(desc, HDR_SHARE_SERIAL);
775
776 /* skip key loading if they are loaded due to sharing */
777 key_jump_cmd = append_jump(desc, JUMP_JSL |
778 JUMP_TEST_ALL | JUMP_COND_SHRD |
779 JUMP_COND_SELF);
780 if (keys_fit_inline)
781 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
782 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
783 else
784 append_key(desc, ctx->key_dma, ctx->enckeylen,
785 CLASS_1 | KEY_DEST_CLASS_REG);
786 set_jump_tgt_here(desc, key_jump_cmd);
787
788 /* class 1 operation */
789 append_operation(desc, ctx->class1_alg_type |
790 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
791
792 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
793 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
794 ctx->authsize + tfm->ivsize);
795
796 /* assoclen = (assoclen + cryptlen) - cryptlen */
797 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
798 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
799
800 /* read IV */
801 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
802 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
803
804 /* jump to zero-payload command if cryptlen is zero */
805 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
806 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
807 JUMP_COND_MATH_Z);
808
809 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
810 /* if asoclen is ZERO, skip reading assoc data */
811 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
812 JUMP_COND_MATH_Z);
813 /* read assoc data */
814 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
815 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
816 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
817
818 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
819
820 /* store encrypted data */
821 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
822
823 /* read payload data */
824 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
825 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
826
827 /* jump the zero-payload commands */
828 append_jump(desc, JUMP_TEST_ALL | 4);
829
830 /* zero-payload command */
831 set_jump_tgt_here(desc, zero_payload_jump_cmd);
832
833 /* if assoclen is ZERO, jump to ICV reading */
834 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
835 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
836 JUMP_COND_MATH_Z);
837 /* read assoc data */
838 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
839 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
840 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
841
842 /* read ICV */
843 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
844 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
845
846 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
847 desc_bytes(desc),
848 DMA_TO_DEVICE);
849 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
850 dev_err(jrdev, "unable to map shared descriptor\n");
851 return -ENOMEM;
852 }
853#ifdef DEBUG
854 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
855 DUMP_PREFIX_ADDRESS, 16, 4, desc,
856 desc_bytes(desc), 1);
857#endif
858
859 return 0;
860}
861
862static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
863{
864 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
865
866 ctx->authsize = authsize;
867 gcm_set_sh_desc(authenc);
868
869 return 0;
870}
871
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300872static int rfc4106_set_sh_desc(struct crypto_aead *aead)
873{
874 struct aead_tfm *tfm = &aead->base.crt_aead;
875 struct caam_ctx *ctx = crypto_aead_ctx(aead);
876 struct device *jrdev = ctx->jrdev;
877 bool keys_fit_inline = false;
878 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
879 u32 *desc;
880 u32 geniv;
881
882 if (!ctx->enckeylen || !ctx->authsize)
883 return 0;
884
885 /*
886 * RFC4106 encrypt shared descriptor
887 * Job Descriptor and Shared Descriptor
888 * must fit into the 64-word Descriptor h/w Buffer
889 */
890 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
891 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
892 keys_fit_inline = true;
893
894 desc = ctx->sh_desc_enc;
895
896 init_sh_desc(desc, HDR_SHARE_SERIAL);
897
898 /* Skip key loading if it is loaded due to sharing */
899 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
900 JUMP_COND_SHRD);
901 if (keys_fit_inline)
902 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
903 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
904 else
905 append_key(desc, ctx->key_dma, ctx->enckeylen,
906 CLASS_1 | KEY_DEST_CLASS_REG);
907 set_jump_tgt_here(desc, key_jump_cmd);
908
909 /* Class 1 operation */
910 append_operation(desc, ctx->class1_alg_type |
911 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
912
913 /* cryptlen = seqoutlen - authsize */
914 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
915 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
916
917 /* assoclen + cryptlen = seqinlen - ivsize */
918 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
919
920 /* assoclen = (assoclen + cryptlen) - cryptlen */
921 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
922
923 /* Read Salt */
924 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
925 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
926 /* Read AES-GCM-ESP IV */
927 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
928 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
929
930 /* Read assoc data */
931 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
932 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
933
934 /* Will read cryptlen bytes */
935 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
936
937 /* Write encrypted data */
938 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
939
940 /* Read payload data */
941 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
942 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
943
944 /* Write ICV */
945 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
946 LDST_SRCDST_BYTE_CONTEXT);
947
948 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
949 desc_bytes(desc),
950 DMA_TO_DEVICE);
951 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
952 dev_err(jrdev, "unable to map shared descriptor\n");
953 return -ENOMEM;
954 }
955#ifdef DEBUG
956 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
957 DUMP_PREFIX_ADDRESS, 16, 4, desc,
958 desc_bytes(desc), 1);
959#endif
960
961 /*
962 * Job Descriptor and Shared Descriptors
963 * must all fit into the 64-word Descriptor h/w Buffer
964 */
965 keys_fit_inline = false;
966 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
967 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
968 keys_fit_inline = true;
969
970 desc = ctx->sh_desc_dec;
971
972 init_sh_desc(desc, HDR_SHARE_SERIAL);
973
974 /* Skip key loading if it is loaded due to sharing */
975 key_jump_cmd = append_jump(desc, JUMP_JSL |
976 JUMP_TEST_ALL | JUMP_COND_SHRD);
977 if (keys_fit_inline)
978 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
979 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
980 else
981 append_key(desc, ctx->key_dma, ctx->enckeylen,
982 CLASS_1 | KEY_DEST_CLASS_REG);
983 set_jump_tgt_here(desc, key_jump_cmd);
984
985 /* Class 1 operation */
986 append_operation(desc, ctx->class1_alg_type |
987 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
988
989 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
990 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
991 ctx->authsize + tfm->ivsize);
992
993 /* assoclen = (assoclen + cryptlen) - cryptlen */
994 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
995 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
996
997 /* Will write cryptlen bytes */
998 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
999
1000 /* Read Salt */
1001 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1002 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1003 /* Read AES-GCM-ESP IV */
1004 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1005 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1006
1007 /* Read assoc data */
1008 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1009 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1010
1011 /* Will read cryptlen bytes */
1012 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1013
1014 /* Store payload data */
1015 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1016
1017 /* Read encrypted data */
1018 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1019 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1020
1021 /* Read ICV */
1022 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1023 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1024
1025 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1026 desc_bytes(desc),
1027 DMA_TO_DEVICE);
1028 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1029 dev_err(jrdev, "unable to map shared descriptor\n");
1030 return -ENOMEM;
1031 }
1032#ifdef DEBUG
1033 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1034 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1035 desc_bytes(desc), 1);
1036#endif
1037
1038 /*
1039 * Job Descriptor and Shared Descriptors
1040 * must all fit into the 64-word Descriptor h/w Buffer
1041 */
1042 keys_fit_inline = false;
1043 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1044 ctx->split_key_pad_len + ctx->enckeylen <=
1045 CAAM_DESC_BYTES_MAX)
1046 keys_fit_inline = true;
1047
1048 /* rfc4106_givencrypt shared descriptor */
1049 desc = ctx->sh_desc_givenc;
1050
1051 init_sh_desc(desc, HDR_SHARE_SERIAL);
1052
1053 /* Skip key loading if it is loaded due to sharing */
1054 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1055 JUMP_COND_SHRD);
1056 if (keys_fit_inline)
1057 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1058 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1059 else
1060 append_key(desc, ctx->key_dma, ctx->enckeylen,
1061 CLASS_1 | KEY_DEST_CLASS_REG);
1062 set_jump_tgt_here(desc, key_jump_cmd);
1063
1064 /* Generate IV */
1065 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1066 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1067 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1068 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1069 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1070 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1071 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1072 (tfm->ivsize << MOVE_LEN_SHIFT));
1073 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1074
1075 /* Copy generated IV to OFIFO */
1076 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1077 (tfm->ivsize << MOVE_LEN_SHIFT));
1078
1079 /* Class 1 operation */
1080 append_operation(desc, ctx->class1_alg_type |
1081 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1082
1083 /* ivsize + cryptlen = seqoutlen - authsize */
1084 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1085
1086 /* assoclen = seqinlen - (ivsize + cryptlen) */
1087 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1088
1089 /* Will write ivsize + cryptlen */
1090 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1091
1092 /* Read Salt and generated IV */
1093 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1094 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1095 /* Append Salt */
1096 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1097 set_move_tgt_here(desc, move_cmd);
1098 set_move_tgt_here(desc, write_iv_cmd);
1099 /* Blank commands. Will be overwritten by generated IV. */
1100 append_cmd(desc, 0x00000000);
1101 append_cmd(desc, 0x00000000);
1102 /* End of blank commands */
1103
1104 /* No need to reload iv */
1105 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1106
1107 /* Read assoc data */
1108 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1109 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1110
1111 /* Will read cryptlen */
1112 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1113
1114 /* Store generated IV and encrypted data */
1115 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1116
1117 /* Read payload data */
1118 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1119 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1120
1121 /* Write ICV */
1122 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1123 LDST_SRCDST_BYTE_CONTEXT);
1124
1125 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1126 desc_bytes(desc),
1127 DMA_TO_DEVICE);
1128 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1129 dev_err(jrdev, "unable to map shared descriptor\n");
1130 return -ENOMEM;
1131 }
1132#ifdef DEBUG
1133 print_hex_dump(KERN_ERR,
1134 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1135 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1136 desc_bytes(desc), 1);
1137#endif
1138
1139 return 0;
1140}
1141
1142static int rfc4106_setauthsize(struct crypto_aead *authenc,
1143 unsigned int authsize)
1144{
1145 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1146
1147 ctx->authsize = authsize;
1148 rfc4106_set_sh_desc(authenc);
1149
1150 return 0;
1151}
1152
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001153static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1154 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001155{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001156 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1157 ctx->split_key_pad_len, key_in, authkeylen,
1158 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001159}
1160
Yuan Kang0e479302011-07-15 11:21:41 +08001161static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001162 const u8 *key, unsigned int keylen)
1163{
1164 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1165 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1166 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1167 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001168 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001169 int ret = 0;
1170
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001171 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001172 goto badkey;
1173
1174 /* Pick class 2 key length from algorithm submask */
1175 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1176 OP_ALG_ALGSEL_SHIFT] * 2;
1177 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1178
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001179 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1180 goto badkey;
1181
Kim Phillips8e8ec592011-03-13 16:54:26 +08001182#ifdef DEBUG
1183 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001184 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1185 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001186 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1187 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001188 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001189 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1190#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001191
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001192 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001193 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001194 goto badkey;
1195 }
1196
1197 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001198 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001199
Yuan Kang885e9e22011-07-15 11:21:41 +08001200 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001201 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001202 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001203 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001204 return -ENOMEM;
1205 }
1206#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001207 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001208 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001209 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001210#endif
1211
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001212 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001213
Yuan Kang1acebad2011-07-15 11:21:42 +08001214 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001215 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001216 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001217 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001218 }
1219
1220 return ret;
1221badkey:
1222 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1223 return -EINVAL;
1224}
1225
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001226static int gcm_setkey(struct crypto_aead *aead,
1227 const u8 *key, unsigned int keylen)
1228{
1229 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1230 struct device *jrdev = ctx->jrdev;
1231 int ret = 0;
1232
1233#ifdef DEBUG
1234 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1235 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1236#endif
1237
1238 memcpy(ctx->key, key, keylen);
1239 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1240 DMA_TO_DEVICE);
1241 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1242 dev_err(jrdev, "unable to map key i/o memory\n");
1243 return -ENOMEM;
1244 }
1245 ctx->enckeylen = keylen;
1246
1247 ret = gcm_set_sh_desc(aead);
1248 if (ret) {
1249 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1250 DMA_TO_DEVICE);
1251 }
1252
1253 return ret;
1254}
1255
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001256static int rfc4106_setkey(struct crypto_aead *aead,
1257 const u8 *key, unsigned int keylen)
1258{
1259 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1260 struct device *jrdev = ctx->jrdev;
1261 int ret = 0;
1262
1263 if (keylen < 4)
1264 return -EINVAL;
1265
1266#ifdef DEBUG
1267 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1268 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1269#endif
1270
1271 memcpy(ctx->key, key, keylen);
1272
1273 /*
1274 * The last four bytes of the key material are used as the salt value
1275 * in the nonce. Update the AES key length.
1276 */
1277 ctx->enckeylen = keylen - 4;
1278
1279 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1280 DMA_TO_DEVICE);
1281 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1282 dev_err(jrdev, "unable to map key i/o memory\n");
1283 return -ENOMEM;
1284 }
1285
1286 ret = rfc4106_set_sh_desc(aead);
1287 if (ret) {
1288 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1289 DMA_TO_DEVICE);
1290 }
1291
1292 return ret;
1293}
1294
Yuan Kangacdca312011-07-15 11:21:42 +08001295static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1296 const u8 *key, unsigned int keylen)
1297{
1298 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1299 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
1300 struct device *jrdev = ctx->jrdev;
1301 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001302 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001303 u32 *desc;
1304
1305#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001306 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001307 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1308#endif
1309
1310 memcpy(ctx->key, key, keylen);
1311 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1312 DMA_TO_DEVICE);
1313 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1314 dev_err(jrdev, "unable to map key i/o memory\n");
1315 return -ENOMEM;
1316 }
1317 ctx->enckeylen = keylen;
1318
1319 /* ablkcipher_encrypt shared descriptor */
1320 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -05001321 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001322 /* Skip if already shared */
1323 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1324 JUMP_COND_SHRD);
1325
1326 /* Load class1 key only */
1327 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1328 ctx->enckeylen, CLASS_1 |
1329 KEY_DEST_CLASS_REG);
1330
1331 set_jump_tgt_here(desc, key_jump_cmd);
1332
Yuan Kangacdca312011-07-15 11:21:42 +08001333 /* Load iv */
1334 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1335 LDST_CLASS_1_CCB | tfm->ivsize);
1336
1337 /* Load operation */
1338 append_operation(desc, ctx->class1_alg_type |
1339 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1340
1341 /* Perform operation */
1342 ablkcipher_append_src_dst(desc);
1343
1344 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1345 desc_bytes(desc),
1346 DMA_TO_DEVICE);
1347 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1348 dev_err(jrdev, "unable to map shared descriptor\n");
1349 return -ENOMEM;
1350 }
1351#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001352 print_hex_dump(KERN_ERR,
1353 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1355 desc_bytes(desc), 1);
1356#endif
1357 /* ablkcipher_decrypt shared descriptor */
1358 desc = ctx->sh_desc_dec;
1359
Kim Phillips61bb86b2012-07-13 17:49:28 -05001360 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001361 /* Skip if already shared */
1362 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1363 JUMP_COND_SHRD);
1364
1365 /* Load class1 key only */
1366 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1367 ctx->enckeylen, CLASS_1 |
1368 KEY_DEST_CLASS_REG);
1369
Yuan Kangacdca312011-07-15 11:21:42 +08001370 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001371
1372 /* load IV */
1373 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1374 LDST_CLASS_1_CCB | tfm->ivsize);
1375
1376 /* Choose operation */
1377 append_dec_op1(desc, ctx->class1_alg_type);
1378
1379 /* Perform operation */
1380 ablkcipher_append_src_dst(desc);
1381
Yuan Kangacdca312011-07-15 11:21:42 +08001382 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1383 desc_bytes(desc),
1384 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001385 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001386 dev_err(jrdev, "unable to map shared descriptor\n");
1387 return -ENOMEM;
1388 }
1389
1390#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001391 print_hex_dump(KERN_ERR,
1392 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001393 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1394 desc_bytes(desc), 1);
1395#endif
1396
1397 return ret;
1398}
1399
Kim Phillips8e8ec592011-03-13 16:54:26 +08001400/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001401 * aead_edesc - s/w-extended aead descriptor
1402 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001403 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001404 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001405 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001406 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001407 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001408 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001409 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001410 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1411 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001412 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1413 */
Yuan Kang0e479302011-07-15 11:21:41 +08001414struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001415 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001416 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001417 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001418 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001419 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001420 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001421 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001422 int sec4_sg_bytes;
1423 dma_addr_t sec4_sg_dma;
1424 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001425 u32 hw_desc[0];
1426};
1427
Yuan Kangacdca312011-07-15 11:21:42 +08001428/*
1429 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1430 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001431 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001432 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001433 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001434 * @iv_dma: dma address of iv for checking continuity and link table
1435 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001436 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1437 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001438 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1439 */
1440struct ablkcipher_edesc {
1441 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001442 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001443 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001444 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001445 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001446 int sec4_sg_bytes;
1447 dma_addr_t sec4_sg_dma;
1448 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001449 u32 hw_desc[0];
1450};
1451
Yuan Kang1acebad2011-07-15 11:21:42 +08001452static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001453 struct scatterlist *dst, int src_nents,
1454 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001455 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1456 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001457{
Yuan Kang643b39b2012-06-22 19:48:49 -05001458 if (dst != src) {
1459 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1460 src_chained);
1461 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1462 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001463 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001464 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1465 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001466 }
1467
Yuan Kang1acebad2011-07-15 11:21:42 +08001468 if (iv_dma)
1469 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001470 if (sec4_sg_bytes)
1471 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001472 DMA_TO_DEVICE);
1473}
1474
Yuan Kang1acebad2011-07-15 11:21:42 +08001475static void aead_unmap(struct device *dev,
1476 struct aead_edesc *edesc,
1477 struct aead_request *req)
1478{
1479 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1480 int ivsize = crypto_aead_ivsize(aead);
1481
Yuan Kang643b39b2012-06-22 19:48:49 -05001482 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1483 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001484
1485 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001486 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1487 edesc->dst_chained, edesc->iv_dma, ivsize,
1488 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001489}
1490
Yuan Kangacdca312011-07-15 11:21:42 +08001491static void ablkcipher_unmap(struct device *dev,
1492 struct ablkcipher_edesc *edesc,
1493 struct ablkcipher_request *req)
1494{
1495 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1496 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1497
1498 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001499 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1500 edesc->dst_chained, edesc->iv_dma, ivsize,
1501 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001502}
1503
Yuan Kang0e479302011-07-15 11:21:41 +08001504static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001505 void *context)
1506{
Yuan Kang0e479302011-07-15 11:21:41 +08001507 struct aead_request *req = context;
1508 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001509#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001510 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001511 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001512 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001513
1514 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1515#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001516
Yuan Kang0e479302011-07-15 11:21:41 +08001517 edesc = (struct aead_edesc *)((char *)desc -
1518 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001519
Marek Vasutfa9659c2014-04-24 20:05:12 +02001520 if (err)
1521 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001522
Yuan Kang0e479302011-07-15 11:21:41 +08001523 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001524
1525#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001526 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001527 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1528 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001529 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001530 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001531 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001532 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001533 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1534 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001535 ctx->authsize + 4, 1);
1536#endif
1537
1538 kfree(edesc);
1539
Yuan Kang0e479302011-07-15 11:21:41 +08001540 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001541}
1542
Yuan Kang0e479302011-07-15 11:21:41 +08001543static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001544 void *context)
1545{
Yuan Kang0e479302011-07-15 11:21:41 +08001546 struct aead_request *req = context;
1547 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001548#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001549 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001550 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001551 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001552
1553 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1554#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001555
Yuan Kang0e479302011-07-15 11:21:41 +08001556 edesc = (struct aead_edesc *)((char *)desc -
1557 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001558
Yuan Kang1acebad2011-07-15 11:21:42 +08001559#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001560 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001561 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1562 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001563 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001564 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02001565 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001566#endif
1567
Marek Vasutfa9659c2014-04-24 20:05:12 +02001568 if (err)
1569 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001570
Yuan Kang0e479302011-07-15 11:21:41 +08001571 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001572
1573 /*
1574 * verify hw auth check passed else return -EBADMSG
1575 */
1576 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1577 err = -EBADMSG;
1578
1579#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001580 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001581 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08001582 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
1583 sizeof(struct iphdr) + req->assoclen +
1584 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001585 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05001586 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08001587 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03001588 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001589 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1590 sg->length + ctx->authsize + 16, 1);
1591 }
1592#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001593
Kim Phillips8e8ec592011-03-13 16:54:26 +08001594 kfree(edesc);
1595
Yuan Kang0e479302011-07-15 11:21:41 +08001596 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001597}
1598
Yuan Kangacdca312011-07-15 11:21:42 +08001599static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1600 void *context)
1601{
1602 struct ablkcipher_request *req = context;
1603 struct ablkcipher_edesc *edesc;
1604#ifdef DEBUG
1605 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1606 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1607
1608 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1609#endif
1610
1611 edesc = (struct ablkcipher_edesc *)((char *)desc -
1612 offsetof(struct ablkcipher_edesc, hw_desc));
1613
Marek Vasutfa9659c2014-04-24 20:05:12 +02001614 if (err)
1615 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001616
1617#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001618 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001619 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1620 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001621 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001622 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1623 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1624#endif
1625
1626 ablkcipher_unmap(jrdev, edesc, req);
1627 kfree(edesc);
1628
1629 ablkcipher_request_complete(req, err);
1630}
1631
1632static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1633 void *context)
1634{
1635 struct ablkcipher_request *req = context;
1636 struct ablkcipher_edesc *edesc;
1637#ifdef DEBUG
1638 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1639 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1640
1641 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1642#endif
1643
1644 edesc = (struct ablkcipher_edesc *)((char *)desc -
1645 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02001646 if (err)
1647 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001648
1649#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001650 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001651 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1652 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001653 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001654 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1655 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1656#endif
1657
1658 ablkcipher_unmap(jrdev, edesc, req);
1659 kfree(edesc);
1660
1661 ablkcipher_request_complete(req, err);
1662}
1663
Kim Phillips8e8ec592011-03-13 16:54:26 +08001664/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001665 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001666 */
Yuan Kang1acebad2011-07-15 11:21:42 +08001667static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1668 struct aead_edesc *edesc,
1669 struct aead_request *req,
1670 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001671{
Yuan Kang0e479302011-07-15 11:21:41 +08001672 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001673 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001674 int ivsize = crypto_aead_ivsize(aead);
1675 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08001676 u32 *desc = edesc->hw_desc;
1677 u32 out_options = 0, in_options;
1678 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001679 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001680 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001681
Yuan Kang1acebad2011-07-15 11:21:42 +08001682#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08001683 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08001684 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001685 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001686 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1687 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001688 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001689 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001690 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001691 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001692 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08001693 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001694 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001695 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1696 desc_bytes(sh_desc), 1);
1697#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001698
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001699 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1700 OP_ALG_ALGSEL_AES) &&
1701 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1702 is_gcm = true;
1703
Yuan Kang1acebad2011-07-15 11:21:42 +08001704 len = desc_len(sh_desc);
1705 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1706
1707 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001708 if (is_gcm)
1709 src_dma = edesc->iv_dma;
1710 else
1711 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08001712 in_options = 0;
1713 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001714 src_dma = edesc->sec4_sg_dma;
1715 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1716 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001717 in_options = LDST_SGF;
1718 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001719
1720 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1721 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001722
Yuan Kang1acebad2011-07-15 11:21:42 +08001723 if (likely(req->src == req->dst)) {
1724 if (all_contig) {
1725 dst_dma = sg_dma_address(req->src);
1726 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001727 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001728 ((edesc->assoc_nents ? : 1) + 1);
1729 out_options = LDST_SGF;
1730 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001731 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001732 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08001733 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001734 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001735 dst_dma = edesc->sec4_sg_dma +
1736 sec4_sg_index *
1737 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001738 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001739 }
1740 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001741 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02001742 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1743 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001744 else
Yuan Kang1acebad2011-07-15 11:21:42 +08001745 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1746 out_options);
1747}
1748
1749/*
1750 * Fill in aead givencrypt job descriptor
1751 */
1752static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1753 struct aead_edesc *edesc,
1754 struct aead_request *req,
1755 int contig)
1756{
1757 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1758 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1759 int ivsize = crypto_aead_ivsize(aead);
1760 int authsize = ctx->authsize;
1761 u32 *desc = edesc->hw_desc;
1762 u32 out_options = 0, in_options;
1763 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001764 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001765 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001766
1767#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08001768 debug("assoclen %d cryptlen %d authsize %d\n",
1769 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001770 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001771 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1772 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001773 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001774 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001775 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001776 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1777 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001778 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001779 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1780 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001781#endif
1782
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001783 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1784 OP_ALG_ALGSEL_AES) &&
1785 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1786 is_gcm = true;
1787
Yuan Kang1acebad2011-07-15 11:21:42 +08001788 len = desc_len(sh_desc);
1789 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1790
1791 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001792 if (is_gcm)
1793 src_dma = edesc->iv_dma;
1794 else
1795 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08001796 in_options = 0;
1797 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001798 src_dma = edesc->sec4_sg_dma;
1799 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001800 in_options = LDST_SGF;
1801 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001802 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1803 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08001804
1805 if (contig & GIV_DST_CONTIG) {
1806 dst_dma = edesc->iv_dma;
1807 } else {
1808 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001809 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001810 (edesc->assoc_nents +
1811 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08001812 out_options = LDST_SGF;
1813 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001814 dst_dma = edesc->sec4_sg_dma +
1815 sec4_sg_index *
1816 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001817 out_options = LDST_SGF;
1818 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001819 }
1820
Horia Geantabbf9c892013-11-28 15:11:16 +02001821 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1822 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001823}
1824
1825/*
Yuan Kangacdca312011-07-15 11:21:42 +08001826 * Fill in ablkcipher job descriptor
1827 */
1828static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1829 struct ablkcipher_edesc *edesc,
1830 struct ablkcipher_request *req,
1831 bool iv_contig)
1832{
1833 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1834 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1835 u32 *desc = edesc->hw_desc;
1836 u32 out_options = 0, in_options;
1837 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001838 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001839
1840#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001841 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001842 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1843 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001844 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001845 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1846 edesc->src_nents ? 100 : req->nbytes, 1);
1847#endif
1848
1849 len = desc_len(sh_desc);
1850 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1851
1852 if (iv_contig) {
1853 src_dma = edesc->iv_dma;
1854 in_options = 0;
1855 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001856 src_dma = edesc->sec4_sg_dma;
1857 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001858 in_options = LDST_SGF;
1859 }
1860 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1861
1862 if (likely(req->src == req->dst)) {
1863 if (!edesc->src_nents && iv_contig) {
1864 dst_dma = sg_dma_address(req->src);
1865 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001866 dst_dma = edesc->sec4_sg_dma +
1867 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001868 out_options = LDST_SGF;
1869 }
1870 } else {
1871 if (!edesc->dst_nents) {
1872 dst_dma = sg_dma_address(req->dst);
1873 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001874 dst_dma = edesc->sec4_sg_dma +
1875 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001876 out_options = LDST_SGF;
1877 }
1878 }
1879 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1880}
1881
1882/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001883 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001884 */
Yuan Kang0e479302011-07-15 11:21:41 +08001885static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02001886 int desc_bytes, bool *all_contig_ptr,
1887 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001888{
Yuan Kang0e479302011-07-15 11:21:41 +08001889 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001890 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1891 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001892 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1893 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1894 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001895 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001896 dma_addr_t iv_dma = 0;
1897 int sgc;
1898 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05001899 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08001900 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001901 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02001902 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001903 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001904
Yuan Kang643b39b2012-06-22 19:48:49 -05001905 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001906
Horia Geantabbf9c892013-11-28 15:11:16 +02001907 if (unlikely(req->dst != req->src)) {
1908 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1909 dst_nents = sg_count(req->dst,
1910 req->cryptlen +
1911 (encrypt ? authsize : (-authsize)),
1912 &dst_chained);
1913 } else {
1914 src_nents = sg_count(req->src,
1915 req->cryptlen +
1916 (encrypt ? authsize : 0),
1917 &src_chained);
1918 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001919
Yuan Kang643b39b2012-06-22 19:48:49 -05001920 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001921 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001922 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001923 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1924 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001925 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001926 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1927 DMA_TO_DEVICE, src_chained);
1928 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1929 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001930 }
1931
Yuan Kang1acebad2011-07-15 11:21:42 +08001932 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001933 if (dma_mapping_error(jrdev, iv_dma)) {
1934 dev_err(jrdev, "unable to map IV\n");
1935 return ERR_PTR(-ENOMEM);
1936 }
1937
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001938 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1939 OP_ALG_ALGSEL_AES) &&
1940 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1941 is_gcm = true;
1942
1943 /*
1944 * Check if data are contiguous.
1945 * GCM expected input sequence: IV, AAD, text
1946 * All other - expected input sequence: AAD, IV, text
1947 */
1948 if (is_gcm)
1949 all_contig = (!assoc_nents &&
1950 iv_dma + ivsize == sg_dma_address(req->assoc) &&
1951 !src_nents && sg_dma_address(req->assoc) +
1952 req->assoclen == sg_dma_address(req->src));
1953 else
1954 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
1955 req->assoclen == iv_dma && !src_nents &&
1956 iv_dma + ivsize == sg_dma_address(req->src));
1957 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08001958 assoc_nents = assoc_nents ? : 1;
1959 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001960 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001961 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001962
Yuan Kanga299c832012-06-22 19:48:46 -05001963 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001964
Yuan Kanga299c832012-06-22 19:48:46 -05001965 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001966
1967 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001968 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001969 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001970 if (!edesc) {
1971 dev_err(jrdev, "could not allocate extended descriptor\n");
1972 return ERR_PTR(-ENOMEM);
1973 }
1974
1975 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001976 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001977 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001978 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001979 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001980 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001981 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001982 edesc->sec4_sg_bytes = sec4_sg_bytes;
1983 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1984 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001985 *all_contig_ptr = all_contig;
1986
Yuan Kanga299c832012-06-22 19:48:46 -05001987 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001988 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001989 if (!is_gcm) {
1990 sg_to_sec4_sg(req->assoc,
1991 (assoc_nents ? : 1),
1992 edesc->sec4_sg +
1993 sec4_sg_index, 0);
1994 sec4_sg_index += assoc_nents ? : 1;
1995 }
1996
Yuan Kanga299c832012-06-22 19:48:46 -05001997 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001998 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001999 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002000
2001 if (is_gcm) {
2002 sg_to_sec4_sg(req->assoc,
2003 (assoc_nents ? : 1),
2004 edesc->sec4_sg +
2005 sec4_sg_index, 0);
2006 sec4_sg_index += assoc_nents ? : 1;
2007 }
2008
Yuan Kanga299c832012-06-22 19:48:46 -05002009 sg_to_sec4_sg_last(req->src,
2010 (src_nents ? : 1),
2011 edesc->sec4_sg +
2012 sec4_sg_index, 0);
2013 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08002014 }
2015 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002016 sg_to_sec4_sg_last(req->dst, dst_nents,
2017 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002018 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302019 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2020 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002021 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2022 dev_err(jrdev, "unable to map S/G table\n");
2023 return ERR_PTR(-ENOMEM);
2024 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002025
2026 return edesc;
2027}
2028
Yuan Kang0e479302011-07-15 11:21:41 +08002029static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002030{
Yuan Kang0e479302011-07-15 11:21:41 +08002031 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002032 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002033 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2034 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002035 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002036 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002037 int ret = 0;
2038
Kim Phillips8e8ec592011-03-13 16:54:26 +08002039 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002040 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002041 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002042 if (IS_ERR(edesc))
2043 return PTR_ERR(edesc);
2044
Yuan Kang1acebad2011-07-15 11:21:42 +08002045 /* Create and submit job descriptor */
2046 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2047 all_contig, true);
2048#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002049 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002050 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2051 desc_bytes(edesc->hw_desc), 1);
2052#endif
2053
Kim Phillips8e8ec592011-03-13 16:54:26 +08002054 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002055 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2056 if (!ret) {
2057 ret = -EINPROGRESS;
2058 } else {
2059 aead_unmap(jrdev, edesc, req);
2060 kfree(edesc);
2061 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002062
Yuan Kang1acebad2011-07-15 11:21:42 +08002063 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002064}
2065
Yuan Kang0e479302011-07-15 11:21:41 +08002066static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002067{
Yuan Kang1acebad2011-07-15 11:21:42 +08002068 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002069 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002070 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2071 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002072 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002073 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002074 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002075
2076 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002077 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002078 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08002079 if (IS_ERR(edesc))
2080 return PTR_ERR(edesc);
2081
Yuan Kang1acebad2011-07-15 11:21:42 +08002082#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002083 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002084 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2085 req->cryptlen, 1);
2086#endif
2087
2088 /* Create and submit job descriptor*/
2089 init_aead_job(ctx->sh_desc_dec,
2090 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2091#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002092 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002093 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2094 desc_bytes(edesc->hw_desc), 1);
2095#endif
2096
Yuan Kang0e479302011-07-15 11:21:41 +08002097 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002098 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2099 if (!ret) {
2100 ret = -EINPROGRESS;
2101 } else {
2102 aead_unmap(jrdev, edesc, req);
2103 kfree(edesc);
2104 }
Yuan Kang0e479302011-07-15 11:21:41 +08002105
Yuan Kang1acebad2011-07-15 11:21:42 +08002106 return ret;
2107}
Yuan Kang0e479302011-07-15 11:21:41 +08002108
Yuan Kang1acebad2011-07-15 11:21:42 +08002109/*
2110 * allocate and map the aead extended descriptor for aead givencrypt
2111 */
2112static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2113 *greq, int desc_bytes,
2114 u32 *contig_ptr)
2115{
2116 struct aead_request *req = &greq->areq;
2117 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2118 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2119 struct device *jrdev = ctx->jrdev;
2120 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2121 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2122 int assoc_nents, src_nents, dst_nents = 0;
2123 struct aead_edesc *edesc;
2124 dma_addr_t iv_dma = 0;
2125 int sgc;
2126 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2127 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002128 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002129 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002130 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002131
Yuan Kang643b39b2012-06-22 19:48:49 -05002132 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2133 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002134
Yuan Kang1acebad2011-07-15 11:21:42 +08002135 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002136 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2137 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002138
Yuan Kang643b39b2012-06-22 19:48:49 -05002139 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002140 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002141 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002142 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2143 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002144 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002145 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2146 DMA_TO_DEVICE, src_chained);
2147 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2148 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002149 }
2150
Yuan Kang1acebad2011-07-15 11:21:42 +08002151 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002152 if (dma_mapping_error(jrdev, iv_dma)) {
2153 dev_err(jrdev, "unable to map IV\n");
2154 return ERR_PTR(-ENOMEM);
2155 }
2156
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002157 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2158 OP_ALG_ALGSEL_AES) &&
2159 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2160 is_gcm = true;
2161
2162 /*
2163 * Check if data are contiguous.
2164 * GCM expected input sequence: IV, AAD, text
2165 * All other - expected input sequence: AAD, IV, text
2166 */
2167
2168 if (is_gcm) {
2169 if (assoc_nents || iv_dma + ivsize !=
2170 sg_dma_address(req->assoc) || src_nents ||
2171 sg_dma_address(req->assoc) + req->assoclen !=
2172 sg_dma_address(req->src))
2173 contig &= ~GIV_SRC_CONTIG;
2174 } else {
2175 if (assoc_nents ||
2176 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2177 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2178 contig &= ~GIV_SRC_CONTIG;
2179 }
2180
Yuan Kang1acebad2011-07-15 11:21:42 +08002181 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2182 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002183
Yuan Kang1acebad2011-07-15 11:21:42 +08002184 if (!(contig & GIV_SRC_CONTIG)) {
2185 assoc_nents = assoc_nents ? : 1;
2186 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002187 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002188 if (req->src == req->dst &&
2189 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad2011-07-15 11:21:42 +08002190 contig &= ~GIV_DST_CONTIG;
2191 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002192
2193 /*
2194 * Add new sg entries for GCM output sequence.
2195 * Expected output sequence: IV, encrypted text.
2196 */
2197 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2198 sec4_sg_len += 1 + src_nents;
2199
2200 if (unlikely(req->src != req->dst)) {
2201 dst_nents = dst_nents ? : 1;
2202 sec4_sg_len += 1 + dst_nents;
2203 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002204
Yuan Kanga299c832012-06-22 19:48:46 -05002205 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002206
2207 /* allocate space for base edesc and hw desc commands, link tables */
2208 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002209 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002210 if (!edesc) {
2211 dev_err(jrdev, "could not allocate extended descriptor\n");
2212 return ERR_PTR(-ENOMEM);
2213 }
2214
2215 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002216 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002217 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002218 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002219 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002220 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002221 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002222 edesc->sec4_sg_bytes = sec4_sg_bytes;
2223 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2224 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002225 *contig_ptr = contig;
2226
Yuan Kanga299c832012-06-22 19:48:46 -05002227 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002228 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002229 if (!is_gcm) {
2230 sg_to_sec4_sg(req->assoc, assoc_nents,
2231 edesc->sec4_sg + sec4_sg_index, 0);
2232 sec4_sg_index += assoc_nents;
2233 }
2234
Yuan Kanga299c832012-06-22 19:48:46 -05002235 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002236 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002237 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002238
2239 if (is_gcm) {
2240 sg_to_sec4_sg(req->assoc, assoc_nents,
2241 edesc->sec4_sg + sec4_sg_index, 0);
2242 sec4_sg_index += assoc_nents;
2243 }
2244
Yuan Kanga299c832012-06-22 19:48:46 -05002245 sg_to_sec4_sg_last(req->src, src_nents,
2246 edesc->sec4_sg +
2247 sec4_sg_index, 0);
2248 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002249 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002250
2251 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2252 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2253 iv_dma, ivsize, 0);
2254 sec4_sg_index += 1;
2255 sg_to_sec4_sg_last(req->src, src_nents,
2256 edesc->sec4_sg + sec4_sg_index, 0);
2257 }
2258
Yuan Kang1acebad2011-07-15 11:21:42 +08002259 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002260 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002261 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002262 sec4_sg_index += 1;
2263 sg_to_sec4_sg_last(req->dst, dst_nents,
2264 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002265 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302266 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2267 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002268 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2269 dev_err(jrdev, "unable to map S/G table\n");
2270 return ERR_PTR(-ENOMEM);
2271 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002272
2273 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002274}
2275
2276static int aead_givencrypt(struct aead_givcrypt_request *areq)
2277{
2278 struct aead_request *req = &areq->areq;
2279 struct aead_edesc *edesc;
2280 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002281 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2282 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002283 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002284 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002285 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002286
Kim Phillips8e8ec592011-03-13 16:54:26 +08002287 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002288 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2289 CAAM_CMD_SZ, &contig);
2290
Kim Phillips8e8ec592011-03-13 16:54:26 +08002291 if (IS_ERR(edesc))
2292 return PTR_ERR(edesc);
2293
Yuan Kang1acebad2011-07-15 11:21:42 +08002294#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002295 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002296 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2297 req->cryptlen, 1);
2298#endif
2299
2300 /* Create and submit job descriptor*/
2301 init_aead_giv_job(ctx->sh_desc_givenc,
2302 ctx->sh_desc_givenc_dma, edesc, req, contig);
2303#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002304 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002305 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2306 desc_bytes(edesc->hw_desc), 1);
2307#endif
2308
Kim Phillips8e8ec592011-03-13 16:54:26 +08002309 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002310 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2311 if (!ret) {
2312 ret = -EINPROGRESS;
2313 } else {
2314 aead_unmap(jrdev, edesc, req);
2315 kfree(edesc);
2316 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002317
Yuan Kang1acebad2011-07-15 11:21:42 +08002318 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002319}
2320
Horia Geantaae4a8252014-03-14 17:46:52 +02002321static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
2322{
2323 return aead_encrypt(&areq->areq);
2324}
2325
Yuan Kangacdca312011-07-15 11:21:42 +08002326/*
2327 * allocate and map the ablkcipher extended descriptor for ablkcipher
2328 */
2329static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2330 *req, int desc_bytes,
2331 bool *iv_contig_out)
2332{
2333 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2334 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2335 struct device *jrdev = ctx->jrdev;
2336 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2337 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2338 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002339 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002340 struct ablkcipher_edesc *edesc;
2341 dma_addr_t iv_dma = 0;
2342 bool iv_contig = false;
2343 int sgc;
2344 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05002345 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002346 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002347
Yuan Kang643b39b2012-06-22 19:48:49 -05002348 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002349
Yuan Kang643b39b2012-06-22 19:48:49 -05002350 if (req->dst != req->src)
2351 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002352
2353 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002354 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2355 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002356 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002357 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2358 DMA_TO_DEVICE, src_chained);
2359 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2360 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002361 }
2362
Horia Geantace572082014-07-11 15:34:49 +03002363 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2364 if (dma_mapping_error(jrdev, iv_dma)) {
2365 dev_err(jrdev, "unable to map IV\n");
2366 return ERR_PTR(-ENOMEM);
2367 }
2368
Yuan Kangacdca312011-07-15 11:21:42 +08002369 /*
2370 * Check if iv can be contiguous with source and destination.
2371 * If so, include it. If not, create scatterlist.
2372 */
Yuan Kangacdca312011-07-15 11:21:42 +08002373 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2374 iv_contig = true;
2375 else
2376 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002377 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2378 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002379
2380 /* allocate space for base edesc and hw desc commands, link tables */
2381 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002382 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002383 if (!edesc) {
2384 dev_err(jrdev, "could not allocate extended descriptor\n");
2385 return ERR_PTR(-ENOMEM);
2386 }
2387
2388 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002389 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002390 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002391 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002392 edesc->sec4_sg_bytes = sec4_sg_bytes;
2393 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2394 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002395
Yuan Kanga299c832012-06-22 19:48:46 -05002396 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002397 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002398 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2399 sg_to_sec4_sg_last(req->src, src_nents,
2400 edesc->sec4_sg + 1, 0);
2401 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002402 }
2403
Yuan Kang643b39b2012-06-22 19:48:49 -05002404 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002405 sg_to_sec4_sg_last(req->dst, dst_nents,
2406 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002407 }
2408
Yuan Kanga299c832012-06-22 19:48:46 -05002409 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2410 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002411 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2412 dev_err(jrdev, "unable to map S/G table\n");
2413 return ERR_PTR(-ENOMEM);
2414 }
2415
Yuan Kangacdca312011-07-15 11:21:42 +08002416 edesc->iv_dma = iv_dma;
2417
2418#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002419 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002420 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2421 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002422#endif
2423
2424 *iv_contig_out = iv_contig;
2425 return edesc;
2426}
2427
2428static int ablkcipher_encrypt(struct ablkcipher_request *req)
2429{
2430 struct ablkcipher_edesc *edesc;
2431 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2432 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2433 struct device *jrdev = ctx->jrdev;
2434 bool iv_contig;
2435 u32 *desc;
2436 int ret = 0;
2437
2438 /* allocate extended descriptor */
2439 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2440 CAAM_CMD_SZ, &iv_contig);
2441 if (IS_ERR(edesc))
2442 return PTR_ERR(edesc);
2443
2444 /* Create and submit job descriptor*/
2445 init_ablkcipher_job(ctx->sh_desc_enc,
2446 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2447#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002448 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002449 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2450 desc_bytes(edesc->hw_desc), 1);
2451#endif
2452 desc = edesc->hw_desc;
2453 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2454
2455 if (!ret) {
2456 ret = -EINPROGRESS;
2457 } else {
2458 ablkcipher_unmap(jrdev, edesc, req);
2459 kfree(edesc);
2460 }
2461
2462 return ret;
2463}
2464
2465static int ablkcipher_decrypt(struct ablkcipher_request *req)
2466{
2467 struct ablkcipher_edesc *edesc;
2468 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2469 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2470 struct device *jrdev = ctx->jrdev;
2471 bool iv_contig;
2472 u32 *desc;
2473 int ret = 0;
2474
2475 /* allocate extended descriptor */
2476 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2477 CAAM_CMD_SZ, &iv_contig);
2478 if (IS_ERR(edesc))
2479 return PTR_ERR(edesc);
2480
2481 /* Create and submit job descriptor*/
2482 init_ablkcipher_job(ctx->sh_desc_dec,
2483 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2484 desc = edesc->hw_desc;
2485#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002486 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002487 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2488 desc_bytes(edesc->hw_desc), 1);
2489#endif
2490
2491 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2492 if (!ret) {
2493 ret = -EINPROGRESS;
2494 } else {
2495 ablkcipher_unmap(jrdev, edesc, req);
2496 kfree(edesc);
2497 }
2498
2499 return ret;
2500}
2501
Yuan Kang885e9e22011-07-15 11:21:41 +08002502#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002503#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002504struct caam_alg_template {
2505 char name[CRYPTO_MAX_ALG_NAME];
2506 char driver_name[CRYPTO_MAX_ALG_NAME];
2507 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002508 u32 type;
2509 union {
2510 struct ablkcipher_alg ablkcipher;
2511 struct aead_alg aead;
2512 struct blkcipher_alg blkcipher;
2513 struct cipher_alg cipher;
2514 struct compress_alg compress;
2515 struct rng_alg rng;
2516 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002517 u32 class1_alg_type;
2518 u32 class2_alg_type;
2519 u32 alg_op;
2520};
2521
2522static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02002523 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08002524 {
Horia Geantaae4a8252014-03-14 17:46:52 +02002525 .name = "authenc(hmac(md5),ecb(cipher_null))",
2526 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
2527 .blocksize = NULL_BLOCK_SIZE,
2528 .type = CRYPTO_ALG_TYPE_AEAD,
2529 .template_aead = {
2530 .setkey = aead_setkey,
2531 .setauthsize = aead_setauthsize,
2532 .encrypt = aead_encrypt,
2533 .decrypt = aead_decrypt,
2534 .givencrypt = aead_null_givencrypt,
2535 .geniv = "<built-in>",
2536 .ivsize = NULL_IV_SIZE,
2537 .maxauthsize = MD5_DIGEST_SIZE,
2538 },
2539 .class1_alg_type = 0,
2540 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2541 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2542 },
2543 {
2544 .name = "authenc(hmac(sha1),ecb(cipher_null))",
2545 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
2546 .blocksize = NULL_BLOCK_SIZE,
2547 .type = CRYPTO_ALG_TYPE_AEAD,
2548 .template_aead = {
2549 .setkey = aead_setkey,
2550 .setauthsize = aead_setauthsize,
2551 .encrypt = aead_encrypt,
2552 .decrypt = aead_decrypt,
2553 .givencrypt = aead_null_givencrypt,
2554 .geniv = "<built-in>",
2555 .ivsize = NULL_IV_SIZE,
2556 .maxauthsize = SHA1_DIGEST_SIZE,
2557 },
2558 .class1_alg_type = 0,
2559 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2560 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2561 },
2562 {
2563 .name = "authenc(hmac(sha224),ecb(cipher_null))",
2564 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
2565 .blocksize = NULL_BLOCK_SIZE,
2566 .type = CRYPTO_ALG_TYPE_AEAD,
2567 .template_aead = {
2568 .setkey = aead_setkey,
2569 .setauthsize = aead_setauthsize,
2570 .encrypt = aead_encrypt,
2571 .decrypt = aead_decrypt,
2572 .givencrypt = aead_null_givencrypt,
2573 .geniv = "<built-in>",
2574 .ivsize = NULL_IV_SIZE,
2575 .maxauthsize = SHA224_DIGEST_SIZE,
2576 },
2577 .class1_alg_type = 0,
2578 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2579 OP_ALG_AAI_HMAC_PRECOMP,
2580 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2581 },
2582 {
2583 .name = "authenc(hmac(sha256),ecb(cipher_null))",
2584 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
2585 .blocksize = NULL_BLOCK_SIZE,
2586 .type = CRYPTO_ALG_TYPE_AEAD,
2587 .template_aead = {
2588 .setkey = aead_setkey,
2589 .setauthsize = aead_setauthsize,
2590 .encrypt = aead_encrypt,
2591 .decrypt = aead_decrypt,
2592 .givencrypt = aead_null_givencrypt,
2593 .geniv = "<built-in>",
2594 .ivsize = NULL_IV_SIZE,
2595 .maxauthsize = SHA256_DIGEST_SIZE,
2596 },
2597 .class1_alg_type = 0,
2598 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2599 OP_ALG_AAI_HMAC_PRECOMP,
2600 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2601 },
2602 {
2603 .name = "authenc(hmac(sha384),ecb(cipher_null))",
2604 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
2605 .blocksize = NULL_BLOCK_SIZE,
2606 .type = CRYPTO_ALG_TYPE_AEAD,
2607 .template_aead = {
2608 .setkey = aead_setkey,
2609 .setauthsize = aead_setauthsize,
2610 .encrypt = aead_encrypt,
2611 .decrypt = aead_decrypt,
2612 .givencrypt = aead_null_givencrypt,
2613 .geniv = "<built-in>",
2614 .ivsize = NULL_IV_SIZE,
2615 .maxauthsize = SHA384_DIGEST_SIZE,
2616 },
2617 .class1_alg_type = 0,
2618 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2619 OP_ALG_AAI_HMAC_PRECOMP,
2620 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2621 },
2622 {
2623 .name = "authenc(hmac(sha512),ecb(cipher_null))",
2624 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
2625 .blocksize = NULL_BLOCK_SIZE,
2626 .type = CRYPTO_ALG_TYPE_AEAD,
2627 .template_aead = {
2628 .setkey = aead_setkey,
2629 .setauthsize = aead_setauthsize,
2630 .encrypt = aead_encrypt,
2631 .decrypt = aead_decrypt,
2632 .givencrypt = aead_null_givencrypt,
2633 .geniv = "<built-in>",
2634 .ivsize = NULL_IV_SIZE,
2635 .maxauthsize = SHA512_DIGEST_SIZE,
2636 },
2637 .class1_alg_type = 0,
2638 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2639 OP_ALG_AAI_HMAC_PRECOMP,
2640 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2641 },
2642 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002643 .name = "authenc(hmac(md5),cbc(aes))",
2644 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
2645 .blocksize = AES_BLOCK_SIZE,
2646 .type = CRYPTO_ALG_TYPE_AEAD,
2647 .template_aead = {
2648 .setkey = aead_setkey,
2649 .setauthsize = aead_setauthsize,
2650 .encrypt = aead_encrypt,
2651 .decrypt = aead_decrypt,
2652 .givencrypt = aead_givencrypt,
2653 .geniv = "<built-in>",
2654 .ivsize = AES_BLOCK_SIZE,
2655 .maxauthsize = MD5_DIGEST_SIZE,
2656 },
2657 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2658 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2659 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2660 },
2661 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002662 .name = "authenc(hmac(sha1),cbc(aes))",
2663 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
2664 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002665 .type = CRYPTO_ALG_TYPE_AEAD,
2666 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002667 .setkey = aead_setkey,
2668 .setauthsize = aead_setauthsize,
2669 .encrypt = aead_encrypt,
2670 .decrypt = aead_decrypt,
2671 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002672 .geniv = "<built-in>",
2673 .ivsize = AES_BLOCK_SIZE,
2674 .maxauthsize = SHA1_DIGEST_SIZE,
2675 },
2676 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2677 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2678 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2679 },
2680 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002681 .name = "authenc(hmac(sha224),cbc(aes))",
2682 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
2683 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302684 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002685 .template_aead = {
2686 .setkey = aead_setkey,
2687 .setauthsize = aead_setauthsize,
2688 .encrypt = aead_encrypt,
2689 .decrypt = aead_decrypt,
2690 .givencrypt = aead_givencrypt,
2691 .geniv = "<built-in>",
2692 .ivsize = AES_BLOCK_SIZE,
2693 .maxauthsize = SHA224_DIGEST_SIZE,
2694 },
2695 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2696 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2697 OP_ALG_AAI_HMAC_PRECOMP,
2698 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2699 },
2700 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002701 .name = "authenc(hmac(sha256),cbc(aes))",
2702 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
2703 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002704 .type = CRYPTO_ALG_TYPE_AEAD,
2705 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002706 .setkey = aead_setkey,
2707 .setauthsize = aead_setauthsize,
2708 .encrypt = aead_encrypt,
2709 .decrypt = aead_decrypt,
2710 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002711 .geniv = "<built-in>",
2712 .ivsize = AES_BLOCK_SIZE,
2713 .maxauthsize = SHA256_DIGEST_SIZE,
2714 },
2715 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2716 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2717 OP_ALG_AAI_HMAC_PRECOMP,
2718 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2719 },
2720 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002721 .name = "authenc(hmac(sha384),cbc(aes))",
2722 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2723 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302724 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002725 .template_aead = {
2726 .setkey = aead_setkey,
2727 .setauthsize = aead_setauthsize,
2728 .encrypt = aead_encrypt,
2729 .decrypt = aead_decrypt,
2730 .givencrypt = aead_givencrypt,
2731 .geniv = "<built-in>",
2732 .ivsize = AES_BLOCK_SIZE,
2733 .maxauthsize = SHA384_DIGEST_SIZE,
2734 },
2735 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2736 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2737 OP_ALG_AAI_HMAC_PRECOMP,
2738 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2739 },
2740
2741 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002742 .name = "authenc(hmac(sha512),cbc(aes))",
2743 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2744 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002745 .type = CRYPTO_ALG_TYPE_AEAD,
2746 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002747 .setkey = aead_setkey,
2748 .setauthsize = aead_setauthsize,
2749 .encrypt = aead_encrypt,
2750 .decrypt = aead_decrypt,
2751 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002752 .geniv = "<built-in>",
2753 .ivsize = AES_BLOCK_SIZE,
2754 .maxauthsize = SHA512_DIGEST_SIZE,
2755 },
2756 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2757 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2758 OP_ALG_AAI_HMAC_PRECOMP,
2759 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2760 },
2761 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002762 .name = "authenc(hmac(md5),cbc(des3_ede))",
2763 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2764 .blocksize = DES3_EDE_BLOCK_SIZE,
2765 .type = CRYPTO_ALG_TYPE_AEAD,
2766 .template_aead = {
2767 .setkey = aead_setkey,
2768 .setauthsize = aead_setauthsize,
2769 .encrypt = aead_encrypt,
2770 .decrypt = aead_decrypt,
2771 .givencrypt = aead_givencrypt,
2772 .geniv = "<built-in>",
2773 .ivsize = DES3_EDE_BLOCK_SIZE,
2774 .maxauthsize = MD5_DIGEST_SIZE,
2775 },
2776 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2777 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2778 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2779 },
2780 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002781 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2782 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2783 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002784 .type = CRYPTO_ALG_TYPE_AEAD,
2785 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002786 .setkey = aead_setkey,
2787 .setauthsize = aead_setauthsize,
2788 .encrypt = aead_encrypt,
2789 .decrypt = aead_decrypt,
2790 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002791 .geniv = "<built-in>",
2792 .ivsize = DES3_EDE_BLOCK_SIZE,
2793 .maxauthsize = SHA1_DIGEST_SIZE,
2794 },
2795 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2796 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2797 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2798 },
2799 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002800 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2801 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2802 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302803 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002804 .template_aead = {
2805 .setkey = aead_setkey,
2806 .setauthsize = aead_setauthsize,
2807 .encrypt = aead_encrypt,
2808 .decrypt = aead_decrypt,
2809 .givencrypt = aead_givencrypt,
2810 .geniv = "<built-in>",
2811 .ivsize = DES3_EDE_BLOCK_SIZE,
2812 .maxauthsize = SHA224_DIGEST_SIZE,
2813 },
2814 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2815 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2816 OP_ALG_AAI_HMAC_PRECOMP,
2817 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2818 },
2819 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002820 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2821 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2822 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002823 .type = CRYPTO_ALG_TYPE_AEAD,
2824 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002825 .setkey = aead_setkey,
2826 .setauthsize = aead_setauthsize,
2827 .encrypt = aead_encrypt,
2828 .decrypt = aead_decrypt,
2829 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002830 .geniv = "<built-in>",
2831 .ivsize = DES3_EDE_BLOCK_SIZE,
2832 .maxauthsize = SHA256_DIGEST_SIZE,
2833 },
2834 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2835 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2836 OP_ALG_AAI_HMAC_PRECOMP,
2837 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2838 },
2839 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002840 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2841 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2842 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302843 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002844 .template_aead = {
2845 .setkey = aead_setkey,
2846 .setauthsize = aead_setauthsize,
2847 .encrypt = aead_encrypt,
2848 .decrypt = aead_decrypt,
2849 .givencrypt = aead_givencrypt,
2850 .geniv = "<built-in>",
2851 .ivsize = DES3_EDE_BLOCK_SIZE,
2852 .maxauthsize = SHA384_DIGEST_SIZE,
2853 },
2854 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2855 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2856 OP_ALG_AAI_HMAC_PRECOMP,
2857 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2858 },
2859 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002860 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2861 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2862 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002863 .type = CRYPTO_ALG_TYPE_AEAD,
2864 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002865 .setkey = aead_setkey,
2866 .setauthsize = aead_setauthsize,
2867 .encrypt = aead_encrypt,
2868 .decrypt = aead_decrypt,
2869 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002870 .geniv = "<built-in>",
2871 .ivsize = DES3_EDE_BLOCK_SIZE,
2872 .maxauthsize = SHA512_DIGEST_SIZE,
2873 },
2874 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2875 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2876 OP_ALG_AAI_HMAC_PRECOMP,
2877 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2878 },
2879 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002880 .name = "authenc(hmac(md5),cbc(des))",
2881 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2882 .blocksize = DES_BLOCK_SIZE,
2883 .type = CRYPTO_ALG_TYPE_AEAD,
2884 .template_aead = {
2885 .setkey = aead_setkey,
2886 .setauthsize = aead_setauthsize,
2887 .encrypt = aead_encrypt,
2888 .decrypt = aead_decrypt,
2889 .givencrypt = aead_givencrypt,
2890 .geniv = "<built-in>",
2891 .ivsize = DES_BLOCK_SIZE,
2892 .maxauthsize = MD5_DIGEST_SIZE,
2893 },
2894 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2895 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2896 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2897 },
2898 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002899 .name = "authenc(hmac(sha1),cbc(des))",
2900 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2901 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002902 .type = CRYPTO_ALG_TYPE_AEAD,
2903 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002904 .setkey = aead_setkey,
2905 .setauthsize = aead_setauthsize,
2906 .encrypt = aead_encrypt,
2907 .decrypt = aead_decrypt,
2908 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002909 .geniv = "<built-in>",
2910 .ivsize = DES_BLOCK_SIZE,
2911 .maxauthsize = SHA1_DIGEST_SIZE,
2912 },
2913 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2914 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2915 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2916 },
2917 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002918 .name = "authenc(hmac(sha224),cbc(des))",
2919 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2920 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302921 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002922 .template_aead = {
2923 .setkey = aead_setkey,
2924 .setauthsize = aead_setauthsize,
2925 .encrypt = aead_encrypt,
2926 .decrypt = aead_decrypt,
2927 .givencrypt = aead_givencrypt,
2928 .geniv = "<built-in>",
2929 .ivsize = DES_BLOCK_SIZE,
2930 .maxauthsize = SHA224_DIGEST_SIZE,
2931 },
2932 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2933 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2934 OP_ALG_AAI_HMAC_PRECOMP,
2935 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2936 },
2937 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002938 .name = "authenc(hmac(sha256),cbc(des))",
2939 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2940 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002941 .type = CRYPTO_ALG_TYPE_AEAD,
2942 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002943 .setkey = aead_setkey,
2944 .setauthsize = aead_setauthsize,
2945 .encrypt = aead_encrypt,
2946 .decrypt = aead_decrypt,
2947 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002948 .geniv = "<built-in>",
2949 .ivsize = DES_BLOCK_SIZE,
2950 .maxauthsize = SHA256_DIGEST_SIZE,
2951 },
2952 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2953 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2954 OP_ALG_AAI_HMAC_PRECOMP,
2955 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2956 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05002957 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002958 .name = "authenc(hmac(sha384),cbc(des))",
2959 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2960 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302961 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002962 .template_aead = {
2963 .setkey = aead_setkey,
2964 .setauthsize = aead_setauthsize,
2965 .encrypt = aead_encrypt,
2966 .decrypt = aead_decrypt,
2967 .givencrypt = aead_givencrypt,
2968 .geniv = "<built-in>",
2969 .ivsize = DES_BLOCK_SIZE,
2970 .maxauthsize = SHA384_DIGEST_SIZE,
2971 },
2972 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2973 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2974 OP_ALG_AAI_HMAC_PRECOMP,
2975 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2976 },
2977 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002978 .name = "authenc(hmac(sha512),cbc(des))",
2979 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2980 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002981 .type = CRYPTO_ALG_TYPE_AEAD,
2982 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002983 .setkey = aead_setkey,
2984 .setauthsize = aead_setauthsize,
2985 .encrypt = aead_encrypt,
2986 .decrypt = aead_decrypt,
2987 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002988 .geniv = "<built-in>",
2989 .ivsize = DES_BLOCK_SIZE,
2990 .maxauthsize = SHA512_DIGEST_SIZE,
2991 },
2992 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2993 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2994 OP_ALG_AAI_HMAC_PRECOMP,
2995 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2996 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002997 {
2998 .name = "rfc4106(gcm(aes))",
2999 .driver_name = "rfc4106-gcm-aes-caam",
3000 .blocksize = 1,
3001 .type = CRYPTO_ALG_TYPE_AEAD,
3002 .template_aead = {
3003 .setkey = rfc4106_setkey,
3004 .setauthsize = rfc4106_setauthsize,
3005 .encrypt = aead_encrypt,
3006 .decrypt = aead_decrypt,
3007 .givencrypt = aead_givencrypt,
3008 .geniv = "<built-in>",
3009 .ivsize = 8,
3010 .maxauthsize = AES_BLOCK_SIZE,
3011 },
3012 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3013 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03003014 /* Galois Counter Mode */
3015 {
3016 .name = "gcm(aes)",
3017 .driver_name = "gcm-aes-caam",
3018 .blocksize = 1,
3019 .type = CRYPTO_ALG_TYPE_AEAD,
3020 .template_aead = {
3021 .setkey = gcm_setkey,
3022 .setauthsize = gcm_setauthsize,
3023 .encrypt = aead_encrypt,
3024 .decrypt = aead_decrypt,
3025 .givencrypt = NULL,
3026 .geniv = "<built-in>",
3027 .ivsize = 12,
3028 .maxauthsize = AES_BLOCK_SIZE,
3029 },
3030 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3031 },
Yuan Kangacdca312011-07-15 11:21:42 +08003032 /* ablkcipher descriptor */
3033 {
3034 .name = "cbc(aes)",
3035 .driver_name = "cbc-aes-caam",
3036 .blocksize = AES_BLOCK_SIZE,
3037 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3038 .template_ablkcipher = {
3039 .setkey = ablkcipher_setkey,
3040 .encrypt = ablkcipher_encrypt,
3041 .decrypt = ablkcipher_decrypt,
3042 .geniv = "eseqiv",
3043 .min_keysize = AES_MIN_KEY_SIZE,
3044 .max_keysize = AES_MAX_KEY_SIZE,
3045 .ivsize = AES_BLOCK_SIZE,
3046 },
3047 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3048 },
3049 {
3050 .name = "cbc(des3_ede)",
3051 .driver_name = "cbc-3des-caam",
3052 .blocksize = DES3_EDE_BLOCK_SIZE,
3053 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3054 .template_ablkcipher = {
3055 .setkey = ablkcipher_setkey,
3056 .encrypt = ablkcipher_encrypt,
3057 .decrypt = ablkcipher_decrypt,
3058 .geniv = "eseqiv",
3059 .min_keysize = DES3_EDE_KEY_SIZE,
3060 .max_keysize = DES3_EDE_KEY_SIZE,
3061 .ivsize = DES3_EDE_BLOCK_SIZE,
3062 },
3063 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3064 },
3065 {
3066 .name = "cbc(des)",
3067 .driver_name = "cbc-des-caam",
3068 .blocksize = DES_BLOCK_SIZE,
3069 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3070 .template_ablkcipher = {
3071 .setkey = ablkcipher_setkey,
3072 .encrypt = ablkcipher_encrypt,
3073 .decrypt = ablkcipher_decrypt,
3074 .geniv = "eseqiv",
3075 .min_keysize = DES_KEY_SIZE,
3076 .max_keysize = DES_KEY_SIZE,
3077 .ivsize = DES_BLOCK_SIZE,
3078 },
3079 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3080 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003081};
3082
3083struct caam_crypto_alg {
3084 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003085 int class1_alg_type;
3086 int class2_alg_type;
3087 int alg_op;
3088 struct crypto_alg crypto_alg;
3089};
3090
3091static int caam_cra_init(struct crypto_tfm *tfm)
3092{
3093 struct crypto_alg *alg = tfm->__crt_alg;
3094 struct caam_crypto_alg *caam_alg =
3095 container_of(alg, struct caam_crypto_alg, crypto_alg);
3096 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003097
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303098 ctx->jrdev = caam_jr_alloc();
3099 if (IS_ERR(ctx->jrdev)) {
3100 pr_err("Job Ring Device allocation for transform failed\n");
3101 return PTR_ERR(ctx->jrdev);
3102 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003103
3104 /* copy descriptor header template value */
3105 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
3106 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
3107 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
3108
3109 return 0;
3110}
3111
3112static void caam_cra_exit(struct crypto_tfm *tfm)
3113{
3114 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3115
Yuan Kang1acebad2011-07-15 11:21:42 +08003116 if (ctx->sh_desc_enc_dma &&
3117 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3118 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3119 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3120 if (ctx->sh_desc_dec_dma &&
3121 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3122 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3123 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3124 if (ctx->sh_desc_givenc_dma &&
3125 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3126 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3127 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003128 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003129 if (ctx->key_dma &&
3130 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3131 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3132 ctx->enckeylen + ctx->split_key_pad_len,
3133 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303134
3135 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003136}
3137
3138static void __exit caam_algapi_exit(void)
3139{
3140
Kim Phillips8e8ec592011-03-13 16:54:26 +08003141 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003142
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303143 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003144 return;
3145
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303146 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003147 crypto_unregister_alg(&t_alg->crypto_alg);
3148 list_del(&t_alg->entry);
3149 kfree(t_alg);
3150 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003151}
3152
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303153static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003154 *template)
3155{
3156 struct caam_crypto_alg *t_alg;
3157 struct crypto_alg *alg;
3158
3159 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
3160 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303161 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003162 return ERR_PTR(-ENOMEM);
3163 }
3164
3165 alg = &t_alg->crypto_alg;
3166
3167 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3168 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3169 template->driver_name);
3170 alg->cra_module = THIS_MODULE;
3171 alg->cra_init = caam_cra_init;
3172 alg->cra_exit = caam_cra_exit;
3173 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003174 alg->cra_blocksize = template->blocksize;
3175 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003176 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003177 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3178 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003179 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08003180 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3181 alg->cra_type = &crypto_ablkcipher_type;
3182 alg->cra_ablkcipher = template->template_ablkcipher;
3183 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003184 case CRYPTO_ALG_TYPE_AEAD:
3185 alg->cra_type = &crypto_aead_type;
3186 alg->cra_aead = template->template_aead;
3187 break;
3188 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003189
3190 t_alg->class1_alg_type = template->class1_alg_type;
3191 t_alg->class2_alg_type = template->class2_alg_type;
3192 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003193
3194 return t_alg;
3195}
3196
3197static int __init caam_algapi_init(void)
3198{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303199 struct device_node *dev_node;
3200 struct platform_device *pdev;
3201 struct device *ctrldev;
3202 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003203 int i = 0, err = 0;
3204
Ruchika Gupta35af6402014-07-07 10:42:12 +05303205 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3206 if (!dev_node) {
3207 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3208 if (!dev_node)
3209 return -ENODEV;
3210 }
3211
3212 pdev = of_find_device_by_node(dev_node);
3213 if (!pdev) {
3214 of_node_put(dev_node);
3215 return -ENODEV;
3216 }
3217
3218 ctrldev = &pdev->dev;
3219 priv = dev_get_drvdata(ctrldev);
3220 of_node_put(dev_node);
3221
3222 /*
3223 * If priv is NULL, it's probably because the caam driver wasn't
3224 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3225 */
3226 if (!priv)
3227 return -ENODEV;
3228
3229
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303230 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003231
3232 /* register crypto algorithms the device supports */
3233 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3234 /* TODO: check if h/w supports alg */
3235 struct caam_crypto_alg *t_alg;
3236
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303237 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003238 if (IS_ERR(t_alg)) {
3239 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303240 pr_warn("%s alg allocation failed\n",
3241 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003242 continue;
3243 }
3244
3245 err = crypto_register_alg(&t_alg->crypto_alg);
3246 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303247 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08003248 t_alg->crypto_alg.cra_driver_name);
3249 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02003250 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303251 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003252 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303253 if (!list_empty(&alg_list))
3254 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003255
3256 return err;
3257}
3258
3259module_init(caam_algapi_init);
3260module_exit(caam_algapi_exit);
3261
3262MODULE_LICENSE("GPL");
3263MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3264MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");