blob: 48df0520ed461aacd3e0d684f951952659db94be [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030077#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
78#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
79#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
80
Tudor Ambarusbac68f22014-10-23 16:14:03 +030081#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
82#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
83#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
84#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
85
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020086#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
87#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
88#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
89#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
90
Yuan Kangacdca312011-07-15 11:21:42 +080091#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
92#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
93 20 * CAAM_CMD_SZ)
94#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
95 15 * CAAM_CMD_SZ)
96
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020097#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
Yuan Kang1acebad2011-07-15 11:21:42 +080098 CAAM_MAX_KEY_SIZE)
99#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500100
Kim Phillips8e8ec592011-03-13 16:54:26 +0800101#ifdef DEBUG
102/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800103#define debug(format, arg...) printk(format, arg)
104#else
105#define debug(format, arg...)
106#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530107static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108
Yuan Kang1acebad2011-07-15 11:21:42 +0800109/* Set DK bit in class 1 operation if shared */
110static inline void append_dec_op1(u32 *desc, u32 type)
111{
112 u32 *jump_cmd, *uncond_jump_cmd;
113
Horia Geantaa60384d2014-07-11 15:46:58 +0300114 /* DK bit is valid only for AES */
115 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
116 append_operation(desc, type | OP_ALG_AS_INITFINAL |
117 OP_ALG_DECRYPT);
118 return;
119 }
120
Yuan Kang1acebad2011-07-15 11:21:42 +0800121 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
122 append_operation(desc, type | OP_ALG_AS_INITFINAL |
123 OP_ALG_DECRYPT);
124 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
125 set_jump_tgt_here(desc, jump_cmd);
126 append_operation(desc, type | OP_ALG_AS_INITFINAL |
127 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
128 set_jump_tgt_here(desc, uncond_jump_cmd);
129}
130
131/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800132 * For aead functions, read payload and write payload,
133 * both of which are specified in req->src and req->dst
134 */
135static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
136{
Horia Geantaae4a8252014-03-14 17:46:52 +0200137 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800138 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
139 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800140}
141
142/*
143 * For aead encrypt and decrypt, read iv for both classes
144 */
145static inline void aead_append_ld_iv(u32 *desc, int ivsize)
146{
147 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
148 LDST_CLASS_1_CCB | ivsize);
149 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
150}
151
152/*
Yuan Kangacdca312011-07-15 11:21:42 +0800153 * For ablkcipher encrypt and decrypt, read from req->src and
154 * write to req->dst
155 */
156static inline void ablkcipher_append_src_dst(u32 *desc)
157{
Kim Phillips70d793c2012-06-22 19:42:35 -0500158 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
159 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
160 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
161 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
162 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800163}
164
165/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800166 * If all data, including src (with assoc and iv) or dst (with iv only) are
167 * contiguous
168 */
169#define GIV_SRC_CONTIG 1
170#define GIV_DST_CONTIG (1 << 1)
171
Kim Phillips8e8ec592011-03-13 16:54:26 +0800172/*
173 * per-session context
174 */
175struct caam_ctx {
176 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800177 u32 sh_desc_enc[DESC_MAX_USED_LEN];
178 u32 sh_desc_dec[DESC_MAX_USED_LEN];
179 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
180 dma_addr_t sh_desc_enc_dma;
181 dma_addr_t sh_desc_dec_dma;
182 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800183 u32 class1_alg_type;
184 u32 class2_alg_type;
185 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800186 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800187 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800188 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800189 unsigned int split_key_len;
190 unsigned int split_key_pad_len;
191 unsigned int authsize;
192};
193
Yuan Kang1acebad2011-07-15 11:21:42 +0800194static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
195 int keys_fit_inline)
196{
197 if (keys_fit_inline) {
198 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
199 ctx->split_key_len, CLASS_2 |
200 KEY_DEST_MDHA_SPLIT | KEY_ENC);
201 append_key_as_imm(desc, (void *)ctx->key +
202 ctx->split_key_pad_len, ctx->enckeylen,
203 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
204 } else {
205 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
206 KEY_DEST_MDHA_SPLIT | KEY_ENC);
207 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
208 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
209 }
210}
211
212static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
213 int keys_fit_inline)
214{
215 u32 *key_jump_cmd;
216
Kim Phillips61bb86b2012-07-13 17:49:28 -0500217 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800218
219 /* Skip if already shared */
220 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
221 JUMP_COND_SHRD);
222
223 append_key_aead(desc, ctx, keys_fit_inline);
224
225 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800226}
227
Horia Geantaae4a8252014-03-14 17:46:52 +0200228static int aead_null_set_sh_desc(struct crypto_aead *aead)
229{
230 struct aead_tfm *tfm = &aead->base.crt_aead;
231 struct caam_ctx *ctx = crypto_aead_ctx(aead);
232 struct device *jrdev = ctx->jrdev;
233 bool keys_fit_inline = false;
234 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
235 u32 *desc;
236
237 /*
238 * Job Descriptor and Shared Descriptors
239 * must all fit into the 64-word Descriptor h/w Buffer
240 */
241 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
242 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
243 keys_fit_inline = true;
244
245 /* aead_encrypt shared descriptor */
246 desc = ctx->sh_desc_enc;
247
248 init_sh_desc(desc, HDR_SHARE_SERIAL);
249
250 /* Skip if already shared */
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252 JUMP_COND_SHRD);
253 if (keys_fit_inline)
254 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
255 ctx->split_key_len, CLASS_2 |
256 KEY_DEST_MDHA_SPLIT | KEY_ENC);
257 else
258 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
259 KEY_DEST_MDHA_SPLIT | KEY_ENC);
260 set_jump_tgt_here(desc, key_jump_cmd);
261
262 /* cryptlen = seqoutlen - authsize */
263 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
264
265 /*
266 * NULL encryption; IV is zero
267 * assoclen = (assoclen + cryptlen) - cryptlen
268 */
269 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
270
271 /* read assoc before reading payload */
272 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
273 KEY_VLF);
274
275 /* Prepare to read and write cryptlen bytes */
276 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
277 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
278
279 /*
280 * MOVE_LEN opcode is not available in all SEC HW revisions,
281 * thus need to do some magic, i.e. self-patch the descriptor
282 * buffer.
283 */
284 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
285 MOVE_DEST_MATH3 |
286 (0x6 << MOVE_LEN_SHIFT));
287 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
288 MOVE_DEST_DESCBUF |
289 MOVE_WAITCOMP |
290 (0x8 << MOVE_LEN_SHIFT));
291
292 /* Class 2 operation */
293 append_operation(desc, ctx->class2_alg_type |
294 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
295
296 /* Read and write cryptlen bytes */
297 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
298
299 set_move_tgt_here(desc, read_move_cmd);
300 set_move_tgt_here(desc, write_move_cmd);
301 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
302 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
303 MOVE_AUX_LS);
304
305 /* Write ICV */
306 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
307 LDST_SRCDST_BYTE_CONTEXT);
308
309 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
310 desc_bytes(desc),
311 DMA_TO_DEVICE);
312 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
313 dev_err(jrdev, "unable to map shared descriptor\n");
314 return -ENOMEM;
315 }
316#ifdef DEBUG
317 print_hex_dump(KERN_ERR,
318 "aead null enc shdesc@"__stringify(__LINE__)": ",
319 DUMP_PREFIX_ADDRESS, 16, 4, desc,
320 desc_bytes(desc), 1);
321#endif
322
323 /*
324 * Job Descriptor and Shared Descriptors
325 * must all fit into the 64-word Descriptor h/w Buffer
326 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500327 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200328 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
329 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
330 keys_fit_inline = true;
331
332 desc = ctx->sh_desc_dec;
333
334 /* aead_decrypt shared descriptor */
335 init_sh_desc(desc, HDR_SHARE_SERIAL);
336
337 /* Skip if already shared */
338 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
339 JUMP_COND_SHRD);
340 if (keys_fit_inline)
341 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
342 ctx->split_key_len, CLASS_2 |
343 KEY_DEST_MDHA_SPLIT | KEY_ENC);
344 else
345 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
346 KEY_DEST_MDHA_SPLIT | KEY_ENC);
347 set_jump_tgt_here(desc, key_jump_cmd);
348
349 /* Class 2 operation */
350 append_operation(desc, ctx->class2_alg_type |
351 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
352
353 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
354 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
355 ctx->authsize + tfm->ivsize);
356 /* assoclen = (assoclen + cryptlen) - cryptlen */
357 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
358 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
359
360 /* read assoc before reading payload */
361 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
362 KEY_VLF);
363
364 /* Prepare to read and write cryptlen bytes */
365 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
366 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
367
368 /*
369 * MOVE_LEN opcode is not available in all SEC HW revisions,
370 * thus need to do some magic, i.e. self-patch the descriptor
371 * buffer.
372 */
373 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
374 MOVE_DEST_MATH2 |
375 (0x6 << MOVE_LEN_SHIFT));
376 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
377 MOVE_DEST_DESCBUF |
378 MOVE_WAITCOMP |
379 (0x8 << MOVE_LEN_SHIFT));
380
381 /* Read and write cryptlen bytes */
382 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
383
384 /*
385 * Insert a NOP here, since we need at least 4 instructions between
386 * code patching the descriptor buffer and the location being patched.
387 */
388 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
389 set_jump_tgt_here(desc, jump_cmd);
390
391 set_move_tgt_here(desc, read_move_cmd);
392 set_move_tgt_here(desc, write_move_cmd);
393 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
394 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
395 MOVE_AUX_LS);
396 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
397
398 /* Load ICV */
399 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
400 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
401
402 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
403 desc_bytes(desc),
404 DMA_TO_DEVICE);
405 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
406 dev_err(jrdev, "unable to map shared descriptor\n");
407 return -ENOMEM;
408 }
409#ifdef DEBUG
410 print_hex_dump(KERN_ERR,
411 "aead null dec shdesc@"__stringify(__LINE__)": ",
412 DUMP_PREFIX_ADDRESS, 16, 4, desc,
413 desc_bytes(desc), 1);
414#endif
415
416 return 0;
417}
418
Yuan Kang1acebad2011-07-15 11:21:42 +0800419static int aead_set_sh_desc(struct crypto_aead *aead)
420{
421 struct aead_tfm *tfm = &aead->base.crt_aead;
422 struct caam_ctx *ctx = crypto_aead_ctx(aead);
423 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800424 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800425 u32 geniv, moveiv;
426 u32 *desc;
427
Horia Geantaae4a8252014-03-14 17:46:52 +0200428 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800429 return 0;
430
Horia Geantaae4a8252014-03-14 17:46:52 +0200431 /* NULL encryption / decryption */
432 if (!ctx->enckeylen)
433 return aead_null_set_sh_desc(aead);
434
Yuan Kang1acebad2011-07-15 11:21:42 +0800435 /*
436 * Job Descriptor and Shared Descriptors
437 * must all fit into the 64-word Descriptor h/w Buffer
438 */
439 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
440 ctx->split_key_pad_len + ctx->enckeylen <=
441 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800442 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800443
444 /* aead_encrypt shared descriptor */
445 desc = ctx->sh_desc_enc;
446
447 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
448
449 /* Class 2 operation */
450 append_operation(desc, ctx->class2_alg_type |
451 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
452
453 /* cryptlen = seqoutlen - authsize */
454 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
455
456 /* assoclen + cryptlen = seqinlen - ivsize */
457 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
458
Horia Geanta4464a7d2014-03-14 17:46:49 +0200459 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800460 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
461
462 /* read assoc before reading payload */
463 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
464 KEY_VLF);
465 aead_append_ld_iv(desc, tfm->ivsize);
466
467 /* Class 1 operation */
468 append_operation(desc, ctx->class1_alg_type |
469 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
470
471 /* Read and write cryptlen bytes */
472 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
473 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
474 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
475
476 /* Write ICV */
477 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
479
480 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
481 desc_bytes(desc),
482 DMA_TO_DEVICE);
483 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
484 dev_err(jrdev, "unable to map shared descriptor\n");
485 return -ENOMEM;
486 }
487#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300488 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800489 DUMP_PREFIX_ADDRESS, 16, 4, desc,
490 desc_bytes(desc), 1);
491#endif
492
493 /*
494 * Job Descriptor and Shared Descriptors
495 * must all fit into the 64-word Descriptor h/w Buffer
496 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500497 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800498 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
499 ctx->split_key_pad_len + ctx->enckeylen <=
500 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800501 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800502
Horia Geanta4464a7d2014-03-14 17:46:49 +0200503 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800504 desc = ctx->sh_desc_dec;
505
Horia Geanta4464a7d2014-03-14 17:46:49 +0200506 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800507
508 /* Class 2 operation */
509 append_operation(desc, ctx->class2_alg_type |
510 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
511
Horia Geanta4464a7d2014-03-14 17:46:49 +0200512 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800513 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200514 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800515 /* assoclen = (assoclen + cryptlen) - cryptlen */
516 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
517 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
518
519 /* read assoc before reading payload */
520 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
521 KEY_VLF);
522
523 aead_append_ld_iv(desc, tfm->ivsize);
524
525 append_dec_op1(desc, ctx->class1_alg_type);
526
527 /* Read and write cryptlen bytes */
528 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
529 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
530 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
531
532 /* Load ICV */
533 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
534 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800535
536 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
537 desc_bytes(desc),
538 DMA_TO_DEVICE);
539 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
540 dev_err(jrdev, "unable to map shared descriptor\n");
541 return -ENOMEM;
542 }
543#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300544 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800545 DUMP_PREFIX_ADDRESS, 16, 4, desc,
546 desc_bytes(desc), 1);
547#endif
548
549 /*
550 * Job Descriptor and Shared Descriptors
551 * must all fit into the 64-word Descriptor h/w Buffer
552 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500553 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800554 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
555 ctx->split_key_pad_len + ctx->enckeylen <=
556 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800557 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800558
559 /* aead_givencrypt shared descriptor */
560 desc = ctx->sh_desc_givenc;
561
562 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
563
564 /* Generate IV */
565 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
566 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
567 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
568 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
569 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
570 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
571 append_move(desc, MOVE_SRC_INFIFO |
572 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
573 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
574
575 /* Copy IV to class 1 context */
576 append_move(desc, MOVE_SRC_CLASS1CTX |
577 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
578
579 /* Return to encryption */
580 append_operation(desc, ctx->class2_alg_type |
581 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
582
583 /* ivsize + cryptlen = seqoutlen - authsize */
584 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
585
586 /* assoclen = seqinlen - (ivsize + cryptlen) */
587 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
588
589 /* read assoc before reading payload */
590 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
591 KEY_VLF);
592
593 /* Copy iv from class 1 ctx to class 2 fifo*/
594 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
595 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
596 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
597 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
598 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
599 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
600
601 /* Class 1 operation */
602 append_operation(desc, ctx->class1_alg_type |
603 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
604
605 /* Will write ivsize + cryptlen */
606 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
607
608 /* Not need to reload iv */
609 append_seq_fifo_load(desc, tfm->ivsize,
610 FIFOLD_CLASS_SKIP);
611
612 /* Will read cryptlen */
613 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
614 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
615
616 /* Write ICV */
617 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
618 LDST_SRCDST_BYTE_CONTEXT);
619
620 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
621 desc_bytes(desc),
622 DMA_TO_DEVICE);
623 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
624 dev_err(jrdev, "unable to map shared descriptor\n");
625 return -ENOMEM;
626 }
627#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300628 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800629 DUMP_PREFIX_ADDRESS, 16, 4, desc,
630 desc_bytes(desc), 1);
631#endif
632
633 return 0;
634}
635
Yuan Kang0e479302011-07-15 11:21:41 +0800636static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800637 unsigned int authsize)
638{
639 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
640
641 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800642 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800643
644 return 0;
645}
646
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300647static int gcm_set_sh_desc(struct crypto_aead *aead)
648{
649 struct aead_tfm *tfm = &aead->base.crt_aead;
650 struct caam_ctx *ctx = crypto_aead_ctx(aead);
651 struct device *jrdev = ctx->jrdev;
652 bool keys_fit_inline = false;
653 u32 *key_jump_cmd, *zero_payload_jump_cmd,
654 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
655 u32 *desc;
656
657 if (!ctx->enckeylen || !ctx->authsize)
658 return 0;
659
660 /*
661 * AES GCM encrypt shared descriptor
662 * Job Descriptor and Shared Descriptor
663 * must fit into the 64-word Descriptor h/w Buffer
664 */
665 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
666 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
667 keys_fit_inline = true;
668
669 desc = ctx->sh_desc_enc;
670
671 init_sh_desc(desc, HDR_SHARE_SERIAL);
672
673 /* skip key loading if they are loaded due to sharing */
674 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
675 JUMP_COND_SHRD | JUMP_COND_SELF);
676 if (keys_fit_inline)
677 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
678 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
679 else
680 append_key(desc, ctx->key_dma, ctx->enckeylen,
681 CLASS_1 | KEY_DEST_CLASS_REG);
682 set_jump_tgt_here(desc, key_jump_cmd);
683
684 /* class 1 operation */
685 append_operation(desc, ctx->class1_alg_type |
686 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
687
688 /* cryptlen = seqoutlen - authsize */
689 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
690
691 /* assoclen + cryptlen = seqinlen - ivsize */
692 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
693
694 /* assoclen = (assoclen + cryptlen) - cryptlen */
695 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
696
697 /* if cryptlen is ZERO jump to zero-payload commands */
698 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
699 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
700 JUMP_COND_MATH_Z);
701 /* read IV */
702 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
703 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
704
705 /* if assoclen is ZERO, skip reading the assoc data */
706 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
707 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
708 JUMP_COND_MATH_Z);
709
710 /* read assoc data */
711 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
712 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
713 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
714
715 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
716
717 /* write encrypted data */
718 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
719
720 /* read payload data */
721 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
722 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
723
724 /* jump the zero-payload commands */
725 append_jump(desc, JUMP_TEST_ALL | 7);
726
727 /* zero-payload commands */
728 set_jump_tgt_here(desc, zero_payload_jump_cmd);
729
730 /* if assoclen is ZERO, jump to IV reading - is the only input data */
731 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
732 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
733 JUMP_COND_MATH_Z);
734 /* read IV */
735 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
736 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
737
738 /* read assoc data */
739 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
740 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
741
742 /* jump to ICV writing */
743 append_jump(desc, JUMP_TEST_ALL | 2);
744
745 /* read IV - is the only input data */
746 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
747 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
748 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
749 FIFOLD_TYPE_LAST1);
750
751 /* write ICV */
752 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
753 LDST_SRCDST_BYTE_CONTEXT);
754
755 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
756 desc_bytes(desc),
757 DMA_TO_DEVICE);
758 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
759 dev_err(jrdev, "unable to map shared descriptor\n");
760 return -ENOMEM;
761 }
762#ifdef DEBUG
763 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
764 DUMP_PREFIX_ADDRESS, 16, 4, desc,
765 desc_bytes(desc), 1);
766#endif
767
768 /*
769 * Job Descriptor and Shared Descriptors
770 * must all fit into the 64-word Descriptor h/w Buffer
771 */
772 keys_fit_inline = false;
773 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
774 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
775 keys_fit_inline = true;
776
777 desc = ctx->sh_desc_dec;
778
779 init_sh_desc(desc, HDR_SHARE_SERIAL);
780
781 /* skip key loading if they are loaded due to sharing */
782 key_jump_cmd = append_jump(desc, JUMP_JSL |
783 JUMP_TEST_ALL | JUMP_COND_SHRD |
784 JUMP_COND_SELF);
785 if (keys_fit_inline)
786 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
787 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
788 else
789 append_key(desc, ctx->key_dma, ctx->enckeylen,
790 CLASS_1 | KEY_DEST_CLASS_REG);
791 set_jump_tgt_here(desc, key_jump_cmd);
792
793 /* class 1 operation */
794 append_operation(desc, ctx->class1_alg_type |
795 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
796
797 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
798 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
799 ctx->authsize + tfm->ivsize);
800
801 /* assoclen = (assoclen + cryptlen) - cryptlen */
802 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
803 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
804
805 /* read IV */
806 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
807 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
808
809 /* jump to zero-payload command if cryptlen is zero */
810 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
811 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
812 JUMP_COND_MATH_Z);
813
814 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
815 /* if asoclen is ZERO, skip reading assoc data */
816 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
817 JUMP_COND_MATH_Z);
818 /* read assoc data */
819 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
820 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
821 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
822
823 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
824
825 /* store encrypted data */
826 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
827
828 /* read payload data */
829 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
830 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
831
832 /* jump the zero-payload commands */
833 append_jump(desc, JUMP_TEST_ALL | 4);
834
835 /* zero-payload command */
836 set_jump_tgt_here(desc, zero_payload_jump_cmd);
837
838 /* if assoclen is ZERO, jump to ICV reading */
839 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
840 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
841 JUMP_COND_MATH_Z);
842 /* read assoc data */
843 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
844 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
845 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
846
847 /* read ICV */
848 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
849 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
850
851 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
852 desc_bytes(desc),
853 DMA_TO_DEVICE);
854 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
855 dev_err(jrdev, "unable to map shared descriptor\n");
856 return -ENOMEM;
857 }
858#ifdef DEBUG
859 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
860 DUMP_PREFIX_ADDRESS, 16, 4, desc,
861 desc_bytes(desc), 1);
862#endif
863
864 return 0;
865}
866
867static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
868{
869 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
870
871 ctx->authsize = authsize;
872 gcm_set_sh_desc(authenc);
873
874 return 0;
875}
876
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300877static int rfc4106_set_sh_desc(struct crypto_aead *aead)
878{
879 struct aead_tfm *tfm = &aead->base.crt_aead;
880 struct caam_ctx *ctx = crypto_aead_ctx(aead);
881 struct device *jrdev = ctx->jrdev;
882 bool keys_fit_inline = false;
883 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
884 u32 *desc;
885 u32 geniv;
886
887 if (!ctx->enckeylen || !ctx->authsize)
888 return 0;
889
890 /*
891 * RFC4106 encrypt shared descriptor
892 * Job Descriptor and Shared Descriptor
893 * must fit into the 64-word Descriptor h/w Buffer
894 */
895 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
896 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
897 keys_fit_inline = true;
898
899 desc = ctx->sh_desc_enc;
900
901 init_sh_desc(desc, HDR_SHARE_SERIAL);
902
903 /* Skip key loading if it is loaded due to sharing */
904 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
905 JUMP_COND_SHRD);
906 if (keys_fit_inline)
907 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
908 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
909 else
910 append_key(desc, ctx->key_dma, ctx->enckeylen,
911 CLASS_1 | KEY_DEST_CLASS_REG);
912 set_jump_tgt_here(desc, key_jump_cmd);
913
914 /* Class 1 operation */
915 append_operation(desc, ctx->class1_alg_type |
916 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
917
918 /* cryptlen = seqoutlen - authsize */
919 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
920 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
921
922 /* assoclen + cryptlen = seqinlen - ivsize */
923 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
924
925 /* assoclen = (assoclen + cryptlen) - cryptlen */
926 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
927
928 /* Read Salt */
929 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
930 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
931 /* Read AES-GCM-ESP IV */
932 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
933 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
934
935 /* Read assoc data */
936 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
937 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
938
939 /* Will read cryptlen bytes */
940 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
941
942 /* Write encrypted data */
943 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
944
945 /* Read payload data */
946 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
947 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
948
949 /* Write ICV */
950 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
951 LDST_SRCDST_BYTE_CONTEXT);
952
953 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
954 desc_bytes(desc),
955 DMA_TO_DEVICE);
956 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
957 dev_err(jrdev, "unable to map shared descriptor\n");
958 return -ENOMEM;
959 }
960#ifdef DEBUG
961 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
962 DUMP_PREFIX_ADDRESS, 16, 4, desc,
963 desc_bytes(desc), 1);
964#endif
965
966 /*
967 * Job Descriptor and Shared Descriptors
968 * must all fit into the 64-word Descriptor h/w Buffer
969 */
970 keys_fit_inline = false;
971 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
972 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
973 keys_fit_inline = true;
974
975 desc = ctx->sh_desc_dec;
976
977 init_sh_desc(desc, HDR_SHARE_SERIAL);
978
979 /* Skip key loading if it is loaded due to sharing */
980 key_jump_cmd = append_jump(desc, JUMP_JSL |
981 JUMP_TEST_ALL | JUMP_COND_SHRD);
982 if (keys_fit_inline)
983 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
984 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
985 else
986 append_key(desc, ctx->key_dma, ctx->enckeylen,
987 CLASS_1 | KEY_DEST_CLASS_REG);
988 set_jump_tgt_here(desc, key_jump_cmd);
989
990 /* Class 1 operation */
991 append_operation(desc, ctx->class1_alg_type |
992 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
993
994 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
995 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
996 ctx->authsize + tfm->ivsize);
997
998 /* assoclen = (assoclen + cryptlen) - cryptlen */
999 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1000 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1001
1002 /* Will write cryptlen bytes */
1003 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1004
1005 /* Read Salt */
1006 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1007 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1008 /* Read AES-GCM-ESP IV */
1009 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1010 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1011
1012 /* Read assoc data */
1013 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1014 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1015
1016 /* Will read cryptlen bytes */
1017 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1018
1019 /* Store payload data */
1020 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1021
1022 /* Read encrypted data */
1023 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1024 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1025
1026 /* Read ICV */
1027 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1028 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1029
1030 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1031 desc_bytes(desc),
1032 DMA_TO_DEVICE);
1033 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1034 dev_err(jrdev, "unable to map shared descriptor\n");
1035 return -ENOMEM;
1036 }
1037#ifdef DEBUG
1038 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1039 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1040 desc_bytes(desc), 1);
1041#endif
1042
1043 /*
1044 * Job Descriptor and Shared Descriptors
1045 * must all fit into the 64-word Descriptor h/w Buffer
1046 */
1047 keys_fit_inline = false;
1048 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1049 ctx->split_key_pad_len + ctx->enckeylen <=
1050 CAAM_DESC_BYTES_MAX)
1051 keys_fit_inline = true;
1052
1053 /* rfc4106_givencrypt shared descriptor */
1054 desc = ctx->sh_desc_givenc;
1055
1056 init_sh_desc(desc, HDR_SHARE_SERIAL);
1057
1058 /* Skip key loading if it is loaded due to sharing */
1059 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1060 JUMP_COND_SHRD);
1061 if (keys_fit_inline)
1062 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1063 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1064 else
1065 append_key(desc, ctx->key_dma, ctx->enckeylen,
1066 CLASS_1 | KEY_DEST_CLASS_REG);
1067 set_jump_tgt_here(desc, key_jump_cmd);
1068
1069 /* Generate IV */
1070 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1071 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1072 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1073 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1074 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1075 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1076 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1077 (tfm->ivsize << MOVE_LEN_SHIFT));
1078 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1079
1080 /* Copy generated IV to OFIFO */
1081 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1082 (tfm->ivsize << MOVE_LEN_SHIFT));
1083
1084 /* Class 1 operation */
1085 append_operation(desc, ctx->class1_alg_type |
1086 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1087
1088 /* ivsize + cryptlen = seqoutlen - authsize */
1089 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1090
1091 /* assoclen = seqinlen - (ivsize + cryptlen) */
1092 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1093
1094 /* Will write ivsize + cryptlen */
1095 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1096
1097 /* Read Salt and generated IV */
1098 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1099 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1100 /* Append Salt */
1101 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1102 set_move_tgt_here(desc, move_cmd);
1103 set_move_tgt_here(desc, write_iv_cmd);
1104 /* Blank commands. Will be overwritten by generated IV. */
1105 append_cmd(desc, 0x00000000);
1106 append_cmd(desc, 0x00000000);
1107 /* End of blank commands */
1108
1109 /* No need to reload iv */
1110 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1111
1112 /* Read assoc data */
1113 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1114 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1115
1116 /* Will read cryptlen */
1117 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1118
1119 /* Store generated IV and encrypted data */
1120 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1121
1122 /* Read payload data */
1123 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1124 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1125
1126 /* Write ICV */
1127 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1128 LDST_SRCDST_BYTE_CONTEXT);
1129
1130 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1131 desc_bytes(desc),
1132 DMA_TO_DEVICE);
1133 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1134 dev_err(jrdev, "unable to map shared descriptor\n");
1135 return -ENOMEM;
1136 }
1137#ifdef DEBUG
1138 print_hex_dump(KERN_ERR,
1139 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1141 desc_bytes(desc), 1);
1142#endif
1143
1144 return 0;
1145}
1146
1147static int rfc4106_setauthsize(struct crypto_aead *authenc,
1148 unsigned int authsize)
1149{
1150 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1151
1152 ctx->authsize = authsize;
1153 rfc4106_set_sh_desc(authenc);
1154
1155 return 0;
1156}
1157
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001158static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1159{
1160 struct aead_tfm *tfm = &aead->base.crt_aead;
1161 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1162 struct device *jrdev = ctx->jrdev;
1163 bool keys_fit_inline = false;
1164 u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
1165 u32 *read_move_cmd, *write_move_cmd;
1166 u32 *desc;
1167 u32 geniv;
1168
1169 if (!ctx->enckeylen || !ctx->authsize)
1170 return 0;
1171
1172 /*
1173 * RFC4543 encrypt shared descriptor
1174 * Job Descriptor and Shared Descriptor
1175 * must fit into the 64-word Descriptor h/w Buffer
1176 */
1177 if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
1178 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1179 keys_fit_inline = true;
1180
1181 desc = ctx->sh_desc_enc;
1182
1183 init_sh_desc(desc, HDR_SHARE_SERIAL);
1184
1185 /* Skip key loading if it is loaded due to sharing */
1186 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1187 JUMP_COND_SHRD);
1188 if (keys_fit_inline)
1189 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1190 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1191 else
1192 append_key(desc, ctx->key_dma, ctx->enckeylen,
1193 CLASS_1 | KEY_DEST_CLASS_REG);
1194 set_jump_tgt_here(desc, key_jump_cmd);
1195
1196 /* Class 1 operation */
1197 append_operation(desc, ctx->class1_alg_type |
1198 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1199
1200 /* Load AES-GMAC ESP IV into Math1 register */
1201 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1202 LDST_CLASS_DECO | tfm->ivsize);
1203
1204 /* Wait the DMA transaction to finish */
1205 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1206 (1 << JUMP_OFFSET_SHIFT));
1207
1208 /* Overwrite blank immediate AES-GMAC ESP IV data */
1209 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1210 (tfm->ivsize << MOVE_LEN_SHIFT));
1211
1212 /* Overwrite blank immediate AAD data */
1213 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1214 (tfm->ivsize << MOVE_LEN_SHIFT));
1215
1216 /* cryptlen = seqoutlen - authsize */
1217 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1218
1219 /* assoclen = (seqinlen - ivsize) - cryptlen */
1220 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1221
1222 /* Read Salt and AES-GMAC ESP IV */
1223 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1224 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1225 /* Append Salt */
1226 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1227 set_move_tgt_here(desc, write_iv_cmd);
1228 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1229 append_cmd(desc, 0x00000000);
1230 append_cmd(desc, 0x00000000);
1231 /* End of blank commands */
1232
1233 /* Read assoc data */
1234 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1235 FIFOLD_TYPE_AAD);
1236
1237 /* Will read cryptlen bytes */
1238 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1239
1240 /* Will write cryptlen bytes */
1241 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1242
1243 /*
1244 * MOVE_LEN opcode is not available in all SEC HW revisions,
1245 * thus need to do some magic, i.e. self-patch the descriptor
1246 * buffer.
1247 */
1248 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1249 (0x6 << MOVE_LEN_SHIFT));
1250 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1251 (0x8 << MOVE_LEN_SHIFT));
1252
1253 /* Authenticate AES-GMAC ESP IV */
1254 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1255 FIFOLD_TYPE_AAD | tfm->ivsize);
1256 set_move_tgt_here(desc, write_aad_cmd);
1257 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1258 append_cmd(desc, 0x00000000);
1259 append_cmd(desc, 0x00000000);
1260 /* End of blank commands */
1261
1262 /* Read and write cryptlen bytes */
1263 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1264
1265 set_move_tgt_here(desc, read_move_cmd);
1266 set_move_tgt_here(desc, write_move_cmd);
1267 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1268 /* Move payload data to OFIFO */
1269 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1270
1271 /* Write ICV */
1272 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1273 LDST_SRCDST_BYTE_CONTEXT);
1274
1275 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1276 desc_bytes(desc),
1277 DMA_TO_DEVICE);
1278 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1279 dev_err(jrdev, "unable to map shared descriptor\n");
1280 return -ENOMEM;
1281 }
1282#ifdef DEBUG
1283 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1284 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1285 desc_bytes(desc), 1);
1286#endif
1287
1288 /*
1289 * Job Descriptor and Shared Descriptors
1290 * must all fit into the 64-word Descriptor h/w Buffer
1291 */
1292 keys_fit_inline = false;
1293 if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
1294 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1295 keys_fit_inline = true;
1296
1297 desc = ctx->sh_desc_dec;
1298
1299 init_sh_desc(desc, HDR_SHARE_SERIAL);
1300
1301 /* Skip key loading if it is loaded due to sharing */
1302 key_jump_cmd = append_jump(desc, JUMP_JSL |
1303 JUMP_TEST_ALL | JUMP_COND_SHRD);
1304 if (keys_fit_inline)
1305 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1306 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1307 else
1308 append_key(desc, ctx->key_dma, ctx->enckeylen,
1309 CLASS_1 | KEY_DEST_CLASS_REG);
1310 set_jump_tgt_here(desc, key_jump_cmd);
1311
1312 /* Class 1 operation */
1313 append_operation(desc, ctx->class1_alg_type |
1314 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1315
1316 /* Load AES-GMAC ESP IV into Math1 register */
1317 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1318 LDST_CLASS_DECO | tfm->ivsize);
1319
1320 /* Wait the DMA transaction to finish */
1321 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1322 (1 << JUMP_OFFSET_SHIFT));
1323
1324 /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1325 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
1326
1327 /* Overwrite blank immediate AES-GMAC ESP IV data */
1328 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1329 (tfm->ivsize << MOVE_LEN_SHIFT));
1330
1331 /* Overwrite blank immediate AAD data */
1332 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1333 (tfm->ivsize << MOVE_LEN_SHIFT));
1334
1335 /* assoclen = (assoclen + cryptlen) - cryptlen */
1336 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1337 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1338
1339 /*
1340 * MOVE_LEN opcode is not available in all SEC HW revisions,
1341 * thus need to do some magic, i.e. self-patch the descriptor
1342 * buffer.
1343 */
1344 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1345 (0x6 << MOVE_LEN_SHIFT));
1346 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1347 (0x8 << MOVE_LEN_SHIFT));
1348
1349 /* Read Salt and AES-GMAC ESP IV */
1350 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1351 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1352 /* Append Salt */
1353 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1354 set_move_tgt_here(desc, write_iv_cmd);
1355 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1356 append_cmd(desc, 0x00000000);
1357 append_cmd(desc, 0x00000000);
1358 /* End of blank commands */
1359
1360 /* Read assoc data */
1361 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1362 FIFOLD_TYPE_AAD);
1363
1364 /* Will read cryptlen bytes */
1365 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1366
1367 /* Will write cryptlen bytes */
1368 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
1369
1370 /* Authenticate AES-GMAC ESP IV */
1371 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1372 FIFOLD_TYPE_AAD | tfm->ivsize);
1373 set_move_tgt_here(desc, write_aad_cmd);
1374 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1375 append_cmd(desc, 0x00000000);
1376 append_cmd(desc, 0x00000000);
1377 /* End of blank commands */
1378
1379 /* Store payload data */
1380 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1381
1382 /* In-snoop cryptlen data */
1383 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1384 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1385
1386 set_move_tgt_here(desc, read_move_cmd);
1387 set_move_tgt_here(desc, write_move_cmd);
1388 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1389 /* Move payload data to OFIFO */
1390 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1391 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1392
1393 /* Read ICV */
1394 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1395 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1396
1397 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1398 desc_bytes(desc),
1399 DMA_TO_DEVICE);
1400 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1401 dev_err(jrdev, "unable to map shared descriptor\n");
1402 return -ENOMEM;
1403 }
1404#ifdef DEBUG
1405 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1407 desc_bytes(desc), 1);
1408#endif
1409
1410 /*
1411 * Job Descriptor and Shared Descriptors
1412 * must all fit into the 64-word Descriptor h/w Buffer
1413 */
1414 keys_fit_inline = false;
1415 if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
1416 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1417 keys_fit_inline = true;
1418
1419 /* rfc4543_givencrypt shared descriptor */
1420 desc = ctx->sh_desc_givenc;
1421
1422 init_sh_desc(desc, HDR_SHARE_SERIAL);
1423
1424 /* Skip key loading if it is loaded due to sharing */
1425 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1426 JUMP_COND_SHRD);
1427 if (keys_fit_inline)
1428 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1429 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1430 else
1431 append_key(desc, ctx->key_dma, ctx->enckeylen,
1432 CLASS_1 | KEY_DEST_CLASS_REG);
1433 set_jump_tgt_here(desc, key_jump_cmd);
1434
1435 /* Generate IV */
1436 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1437 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1438 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1439 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1440 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1441 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1442 /* Move generated IV to Math1 register */
1443 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
1444 (tfm->ivsize << MOVE_LEN_SHIFT));
1445 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1446
1447 /* Overwrite blank immediate AES-GMAC IV data */
1448 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1449 (tfm->ivsize << MOVE_LEN_SHIFT));
1450
1451 /* Overwrite blank immediate AAD data */
1452 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1453 (tfm->ivsize << MOVE_LEN_SHIFT));
1454
1455 /* Copy generated IV to OFIFO */
1456 append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
1457 (tfm->ivsize << MOVE_LEN_SHIFT));
1458
1459 /* Class 1 operation */
1460 append_operation(desc, ctx->class1_alg_type |
1461 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1462
1463 /* ivsize + cryptlen = seqoutlen - authsize */
1464 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1465
1466 /* assoclen = seqinlen - (ivsize + cryptlen) */
1467 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1468
1469 /* Will write ivsize + cryptlen */
1470 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1471
1472 /*
1473 * MOVE_LEN opcode is not available in all SEC HW revisions,
1474 * thus need to do some magic, i.e. self-patch the descriptor
1475 * buffer.
1476 */
1477 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1478 (0x6 << MOVE_LEN_SHIFT));
1479 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1480 (0x8 << MOVE_LEN_SHIFT));
1481
1482 /* Read Salt and AES-GMAC generated IV */
1483 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1484 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1485 /* Append Salt */
1486 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1487 set_move_tgt_here(desc, write_iv_cmd);
1488 /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1489 append_cmd(desc, 0x00000000);
1490 append_cmd(desc, 0x00000000);
1491 /* End of blank commands */
1492
1493 /* No need to reload iv */
1494 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1495
1496 /* Read assoc data */
1497 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1498 FIFOLD_TYPE_AAD);
1499
1500 /* Will read cryptlen */
1501 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1502
1503 /* Authenticate AES-GMAC IV */
1504 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1505 FIFOLD_TYPE_AAD | tfm->ivsize);
1506 set_move_tgt_here(desc, write_aad_cmd);
1507 /* Blank commands. Will be overwritten by AES-GMAC IV. */
1508 append_cmd(desc, 0x00000000);
1509 append_cmd(desc, 0x00000000);
1510 /* End of blank commands */
1511
1512 /* Read and write cryptlen bytes */
1513 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1514
1515 set_move_tgt_here(desc, read_move_cmd);
1516 set_move_tgt_here(desc, write_move_cmd);
1517 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1518 /* Move payload data to OFIFO */
1519 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1520
1521 /* Write ICV */
1522 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1523 LDST_SRCDST_BYTE_CONTEXT);
1524
1525 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1526 desc_bytes(desc),
1527 DMA_TO_DEVICE);
1528 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1529 dev_err(jrdev, "unable to map shared descriptor\n");
1530 return -ENOMEM;
1531 }
1532#ifdef DEBUG
1533 print_hex_dump(KERN_ERR,
1534 "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
1535 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1536 desc_bytes(desc), 1);
1537#endif
1538
1539 return 0;
1540}
1541
1542static int rfc4543_setauthsize(struct crypto_aead *authenc,
1543 unsigned int authsize)
1544{
1545 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1546
1547 ctx->authsize = authsize;
1548 rfc4543_set_sh_desc(authenc);
1549
1550 return 0;
1551}
1552
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001553static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1554 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001555{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001556 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1557 ctx->split_key_pad_len, key_in, authkeylen,
1558 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001559}
1560
Yuan Kang0e479302011-07-15 11:21:41 +08001561static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001562 const u8 *key, unsigned int keylen)
1563{
1564 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1565 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1566 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1567 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001568 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001569 int ret = 0;
1570
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001571 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001572 goto badkey;
1573
1574 /* Pick class 2 key length from algorithm submask */
1575 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1576 OP_ALG_ALGSEL_SHIFT] * 2;
1577 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1578
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001579 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1580 goto badkey;
1581
Kim Phillips8e8ec592011-03-13 16:54:26 +08001582#ifdef DEBUG
1583 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001584 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1585 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001586 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1587 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001588 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1590#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001591
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001592 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001593 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001594 goto badkey;
1595 }
1596
1597 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001598 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001599
Yuan Kang885e9e22011-07-15 11:21:41 +08001600 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001601 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001602 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001603 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001604 return -ENOMEM;
1605 }
1606#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001607 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001608 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001609 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001610#endif
1611
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001612 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001613
Yuan Kang1acebad2011-07-15 11:21:42 +08001614 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001615 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001616 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001617 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001618 }
1619
1620 return ret;
1621badkey:
1622 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1623 return -EINVAL;
1624}
1625
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001626static int gcm_setkey(struct crypto_aead *aead,
1627 const u8 *key, unsigned int keylen)
1628{
1629 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1630 struct device *jrdev = ctx->jrdev;
1631 int ret = 0;
1632
1633#ifdef DEBUG
1634 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1635 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1636#endif
1637
1638 memcpy(ctx->key, key, keylen);
1639 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1640 DMA_TO_DEVICE);
1641 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1642 dev_err(jrdev, "unable to map key i/o memory\n");
1643 return -ENOMEM;
1644 }
1645 ctx->enckeylen = keylen;
1646
1647 ret = gcm_set_sh_desc(aead);
1648 if (ret) {
1649 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1650 DMA_TO_DEVICE);
1651 }
1652
1653 return ret;
1654}
1655
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001656static int rfc4106_setkey(struct crypto_aead *aead,
1657 const u8 *key, unsigned int keylen)
1658{
1659 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1660 struct device *jrdev = ctx->jrdev;
1661 int ret = 0;
1662
1663 if (keylen < 4)
1664 return -EINVAL;
1665
1666#ifdef DEBUG
1667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1668 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1669#endif
1670
1671 memcpy(ctx->key, key, keylen);
1672
1673 /*
1674 * The last four bytes of the key material are used as the salt value
1675 * in the nonce. Update the AES key length.
1676 */
1677 ctx->enckeylen = keylen - 4;
1678
1679 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1680 DMA_TO_DEVICE);
1681 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1682 dev_err(jrdev, "unable to map key i/o memory\n");
1683 return -ENOMEM;
1684 }
1685
1686 ret = rfc4106_set_sh_desc(aead);
1687 if (ret) {
1688 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1689 DMA_TO_DEVICE);
1690 }
1691
1692 return ret;
1693}
1694
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001695static int rfc4543_setkey(struct crypto_aead *aead,
1696 const u8 *key, unsigned int keylen)
1697{
1698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1699 struct device *jrdev = ctx->jrdev;
1700 int ret = 0;
1701
1702 if (keylen < 4)
1703 return -EINVAL;
1704
1705#ifdef DEBUG
1706 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1707 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1708#endif
1709
1710 memcpy(ctx->key, key, keylen);
1711
1712 /*
1713 * The last four bytes of the key material are used as the salt value
1714 * in the nonce. Update the AES key length.
1715 */
1716 ctx->enckeylen = keylen - 4;
1717
1718 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1719 DMA_TO_DEVICE);
1720 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1721 dev_err(jrdev, "unable to map key i/o memory\n");
1722 return -ENOMEM;
1723 }
1724
1725 ret = rfc4543_set_sh_desc(aead);
1726 if (ret) {
1727 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1728 DMA_TO_DEVICE);
1729 }
1730
1731 return ret;
1732}
1733
Yuan Kangacdca312011-07-15 11:21:42 +08001734static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1735 const u8 *key, unsigned int keylen)
1736{
1737 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1738 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
1739 struct device *jrdev = ctx->jrdev;
1740 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001741 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001742 u32 *desc;
1743
1744#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001745 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001746 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1747#endif
1748
1749 memcpy(ctx->key, key, keylen);
1750 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1751 DMA_TO_DEVICE);
1752 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1753 dev_err(jrdev, "unable to map key i/o memory\n");
1754 return -ENOMEM;
1755 }
1756 ctx->enckeylen = keylen;
1757
1758 /* ablkcipher_encrypt shared descriptor */
1759 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -05001760 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001761 /* Skip if already shared */
1762 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1763 JUMP_COND_SHRD);
1764
1765 /* Load class1 key only */
1766 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1767 ctx->enckeylen, CLASS_1 |
1768 KEY_DEST_CLASS_REG);
1769
1770 set_jump_tgt_here(desc, key_jump_cmd);
1771
Yuan Kangacdca312011-07-15 11:21:42 +08001772 /* Load iv */
1773 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1774 LDST_CLASS_1_CCB | tfm->ivsize);
1775
1776 /* Load operation */
1777 append_operation(desc, ctx->class1_alg_type |
1778 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1779
1780 /* Perform operation */
1781 ablkcipher_append_src_dst(desc);
1782
1783 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1784 desc_bytes(desc),
1785 DMA_TO_DEVICE);
1786 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1787 dev_err(jrdev, "unable to map shared descriptor\n");
1788 return -ENOMEM;
1789 }
1790#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001791 print_hex_dump(KERN_ERR,
1792 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001793 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1794 desc_bytes(desc), 1);
1795#endif
1796 /* ablkcipher_decrypt shared descriptor */
1797 desc = ctx->sh_desc_dec;
1798
Kim Phillips61bb86b2012-07-13 17:49:28 -05001799 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001800 /* Skip if already shared */
1801 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1802 JUMP_COND_SHRD);
1803
1804 /* Load class1 key only */
1805 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1806 ctx->enckeylen, CLASS_1 |
1807 KEY_DEST_CLASS_REG);
1808
Yuan Kangacdca312011-07-15 11:21:42 +08001809 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001810
1811 /* load IV */
1812 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1813 LDST_CLASS_1_CCB | tfm->ivsize);
1814
1815 /* Choose operation */
1816 append_dec_op1(desc, ctx->class1_alg_type);
1817
1818 /* Perform operation */
1819 ablkcipher_append_src_dst(desc);
1820
Yuan Kangacdca312011-07-15 11:21:42 +08001821 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1822 desc_bytes(desc),
1823 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001824 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001825 dev_err(jrdev, "unable to map shared descriptor\n");
1826 return -ENOMEM;
1827 }
1828
1829#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001830 print_hex_dump(KERN_ERR,
1831 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001832 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1833 desc_bytes(desc), 1);
1834#endif
1835
1836 return ret;
1837}
1838
Kim Phillips8e8ec592011-03-13 16:54:26 +08001839/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001840 * aead_edesc - s/w-extended aead descriptor
1841 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001842 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001843 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001844 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001845 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001846 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001847 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001848 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001849 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1850 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001851 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1852 */
Yuan Kang0e479302011-07-15 11:21:41 +08001853struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001854 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001855 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001856 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001857 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001858 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001859 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001860 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001861 int sec4_sg_bytes;
1862 dma_addr_t sec4_sg_dma;
1863 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001864 u32 hw_desc[0];
1865};
1866
Yuan Kangacdca312011-07-15 11:21:42 +08001867/*
1868 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1869 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001870 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001871 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001872 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001873 * @iv_dma: dma address of iv for checking continuity and link table
1874 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001875 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1876 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001877 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1878 */
1879struct ablkcipher_edesc {
1880 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001881 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001882 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001883 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001884 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001885 int sec4_sg_bytes;
1886 dma_addr_t sec4_sg_dma;
1887 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001888 u32 hw_desc[0];
1889};
1890
Yuan Kang1acebad2011-07-15 11:21:42 +08001891static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001892 struct scatterlist *dst, int src_nents,
1893 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001894 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1895 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001896{
Yuan Kang643b39b2012-06-22 19:48:49 -05001897 if (dst != src) {
1898 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1899 src_chained);
1900 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1901 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001902 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001903 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1904 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001905 }
1906
Yuan Kang1acebad2011-07-15 11:21:42 +08001907 if (iv_dma)
1908 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001909 if (sec4_sg_bytes)
1910 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001911 DMA_TO_DEVICE);
1912}
1913
Yuan Kang1acebad2011-07-15 11:21:42 +08001914static void aead_unmap(struct device *dev,
1915 struct aead_edesc *edesc,
1916 struct aead_request *req)
1917{
1918 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1919 int ivsize = crypto_aead_ivsize(aead);
1920
Yuan Kang643b39b2012-06-22 19:48:49 -05001921 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1922 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001923
1924 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001925 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1926 edesc->dst_chained, edesc->iv_dma, ivsize,
1927 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001928}
1929
Yuan Kangacdca312011-07-15 11:21:42 +08001930static void ablkcipher_unmap(struct device *dev,
1931 struct ablkcipher_edesc *edesc,
1932 struct ablkcipher_request *req)
1933{
1934 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1935 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1936
1937 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001938 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1939 edesc->dst_chained, edesc->iv_dma, ivsize,
1940 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001941}
1942
Yuan Kang0e479302011-07-15 11:21:41 +08001943static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001944 void *context)
1945{
Yuan Kang0e479302011-07-15 11:21:41 +08001946 struct aead_request *req = context;
1947 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001948#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001949 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001950 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001951 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001952
1953 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1954#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001955
Yuan Kang0e479302011-07-15 11:21:41 +08001956 edesc = (struct aead_edesc *)((char *)desc -
1957 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001958
Marek Vasutfa9659c2014-04-24 20:05:12 +02001959 if (err)
1960 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001961
Yuan Kang0e479302011-07-15 11:21:41 +08001962 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001963
1964#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001965 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001966 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1967 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001968 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001969 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001970 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001971 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001972 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1973 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001974 ctx->authsize + 4, 1);
1975#endif
1976
1977 kfree(edesc);
1978
Yuan Kang0e479302011-07-15 11:21:41 +08001979 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001980}
1981
Yuan Kang0e479302011-07-15 11:21:41 +08001982static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001983 void *context)
1984{
Yuan Kang0e479302011-07-15 11:21:41 +08001985 struct aead_request *req = context;
1986 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001987#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001988 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001989 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001990 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001991
1992 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1993#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001994
Yuan Kang0e479302011-07-15 11:21:41 +08001995 edesc = (struct aead_edesc *)((char *)desc -
1996 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001997
Yuan Kang1acebad2011-07-15 11:21:42 +08001998#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001999 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002000 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2001 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002002 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002003 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02002004 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002005#endif
2006
Marek Vasutfa9659c2014-04-24 20:05:12 +02002007 if (err)
2008 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002009
Yuan Kang0e479302011-07-15 11:21:41 +08002010 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002011
2012 /*
2013 * verify hw auth check passed else return -EBADMSG
2014 */
2015 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2016 err = -EBADMSG;
2017
2018#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002019 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002020 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08002021 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
2022 sizeof(struct iphdr) + req->assoclen +
2023 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08002024 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05002025 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08002026 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03002027 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002028 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
2029 sg->length + ctx->authsize + 16, 1);
2030 }
2031#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002032
Kim Phillips8e8ec592011-03-13 16:54:26 +08002033 kfree(edesc);
2034
Yuan Kang0e479302011-07-15 11:21:41 +08002035 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002036}
2037
Yuan Kangacdca312011-07-15 11:21:42 +08002038static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2039 void *context)
2040{
2041 struct ablkcipher_request *req = context;
2042 struct ablkcipher_edesc *edesc;
2043#ifdef DEBUG
2044 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2045 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2046
2047 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2048#endif
2049
2050 edesc = (struct ablkcipher_edesc *)((char *)desc -
2051 offsetof(struct ablkcipher_edesc, hw_desc));
2052
Marek Vasutfa9659c2014-04-24 20:05:12 +02002053 if (err)
2054 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002055
2056#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002057 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002058 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2059 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002060 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002061 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2062 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2063#endif
2064
2065 ablkcipher_unmap(jrdev, edesc, req);
2066 kfree(edesc);
2067
2068 ablkcipher_request_complete(req, err);
2069}
2070
2071static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2072 void *context)
2073{
2074 struct ablkcipher_request *req = context;
2075 struct ablkcipher_edesc *edesc;
2076#ifdef DEBUG
2077 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2078 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2079
2080 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2081#endif
2082
2083 edesc = (struct ablkcipher_edesc *)((char *)desc -
2084 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002085 if (err)
2086 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002087
2088#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002089 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002090 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2091 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002092 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002093 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2094 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2095#endif
2096
2097 ablkcipher_unmap(jrdev, edesc, req);
2098 kfree(edesc);
2099
2100 ablkcipher_request_complete(req, err);
2101}
2102
Kim Phillips8e8ec592011-03-13 16:54:26 +08002103/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002104 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002105 */
Yuan Kang1acebad2011-07-15 11:21:42 +08002106static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2107 struct aead_edesc *edesc,
2108 struct aead_request *req,
2109 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002110{
Yuan Kang0e479302011-07-15 11:21:41 +08002111 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002112 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002113 int ivsize = crypto_aead_ivsize(aead);
2114 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08002115 u32 *desc = edesc->hw_desc;
2116 u32 out_options = 0, in_options;
2117 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002118 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002119 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002120
Yuan Kang1acebad2011-07-15 11:21:42 +08002121#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08002122 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08002123 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002124 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002125 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2126 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002127 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002128 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002129 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002130 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002131 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08002132 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002133 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002134 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2135 desc_bytes(sh_desc), 1);
2136#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002137
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002138 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2139 OP_ALG_ALGSEL_AES) &&
2140 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2141 is_gcm = true;
2142
Yuan Kang1acebad2011-07-15 11:21:42 +08002143 len = desc_len(sh_desc);
2144 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2145
2146 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002147 if (is_gcm)
2148 src_dma = edesc->iv_dma;
2149 else
2150 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002151 in_options = 0;
2152 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002153 src_dma = edesc->sec4_sg_dma;
2154 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2155 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002156 in_options = LDST_SGF;
2157 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002158
2159 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2160 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002161
Yuan Kang1acebad2011-07-15 11:21:42 +08002162 if (likely(req->src == req->dst)) {
2163 if (all_contig) {
2164 dst_dma = sg_dma_address(req->src);
2165 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002166 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08002167 ((edesc->assoc_nents ? : 1) + 1);
2168 out_options = LDST_SGF;
2169 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002170 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002171 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08002172 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002173 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002174 dst_dma = edesc->sec4_sg_dma +
2175 sec4_sg_index *
2176 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002177 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002178 }
2179 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002180 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02002181 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2182 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002183 else
Yuan Kang1acebad2011-07-15 11:21:42 +08002184 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2185 out_options);
2186}
2187
2188/*
2189 * Fill in aead givencrypt job descriptor
2190 */
2191static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2192 struct aead_edesc *edesc,
2193 struct aead_request *req,
2194 int contig)
2195{
2196 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2197 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2198 int ivsize = crypto_aead_ivsize(aead);
2199 int authsize = ctx->authsize;
2200 u32 *desc = edesc->hw_desc;
2201 u32 out_options = 0, in_options;
2202 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002203 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002204 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002205
2206#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08002207 debug("assoclen %d cryptlen %d authsize %d\n",
2208 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002209 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002210 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2211 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002212 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002213 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002214 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002215 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2216 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002217 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002218 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2219 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002220#endif
2221
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002222 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2223 OP_ALG_ALGSEL_AES) &&
2224 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2225 is_gcm = true;
2226
Yuan Kang1acebad2011-07-15 11:21:42 +08002227 len = desc_len(sh_desc);
2228 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2229
2230 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002231 if (is_gcm)
2232 src_dma = edesc->iv_dma;
2233 else
2234 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002235 in_options = 0;
2236 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002237 src_dma = edesc->sec4_sg_dma;
2238 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002239 in_options = LDST_SGF;
2240 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002241 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2242 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08002243
2244 if (contig & GIV_DST_CONTIG) {
2245 dst_dma = edesc->iv_dma;
2246 } else {
2247 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05002248 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002249 (edesc->assoc_nents +
2250 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08002251 out_options = LDST_SGF;
2252 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002253 dst_dma = edesc->sec4_sg_dma +
2254 sec4_sg_index *
2255 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002256 out_options = LDST_SGF;
2257 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002258 }
2259
Horia Geantabbf9c892013-11-28 15:11:16 +02002260 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2261 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002262}
2263
2264/*
Yuan Kangacdca312011-07-15 11:21:42 +08002265 * Fill in ablkcipher job descriptor
2266 */
2267static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2268 struct ablkcipher_edesc *edesc,
2269 struct ablkcipher_request *req,
2270 bool iv_contig)
2271{
2272 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2273 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2274 u32 *desc = edesc->hw_desc;
2275 u32 out_options = 0, in_options;
2276 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002277 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002278
2279#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002280 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002281 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2282 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002283 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002284 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2285 edesc->src_nents ? 100 : req->nbytes, 1);
2286#endif
2287
2288 len = desc_len(sh_desc);
2289 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2290
2291 if (iv_contig) {
2292 src_dma = edesc->iv_dma;
2293 in_options = 0;
2294 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002295 src_dma = edesc->sec4_sg_dma;
2296 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002297 in_options = LDST_SGF;
2298 }
2299 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2300
2301 if (likely(req->src == req->dst)) {
2302 if (!edesc->src_nents && iv_contig) {
2303 dst_dma = sg_dma_address(req->src);
2304 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002305 dst_dma = edesc->sec4_sg_dma +
2306 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002307 out_options = LDST_SGF;
2308 }
2309 } else {
2310 if (!edesc->dst_nents) {
2311 dst_dma = sg_dma_address(req->dst);
2312 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002313 dst_dma = edesc->sec4_sg_dma +
2314 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002315 out_options = LDST_SGF;
2316 }
2317 }
2318 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2319}
2320
2321/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002322 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002323 */
Yuan Kang0e479302011-07-15 11:21:41 +08002324static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02002325 int desc_bytes, bool *all_contig_ptr,
2326 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002327{
Yuan Kang0e479302011-07-15 11:21:41 +08002328 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002329 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2330 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002331 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2332 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2333 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002334 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002335 dma_addr_t iv_dma = 0;
2336 int sgc;
2337 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05002338 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08002339 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05002340 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02002341 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002342 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002343
Yuan Kang643b39b2012-06-22 19:48:49 -05002344 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002345
Horia Geantabbf9c892013-11-28 15:11:16 +02002346 if (unlikely(req->dst != req->src)) {
2347 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2348 dst_nents = sg_count(req->dst,
2349 req->cryptlen +
2350 (encrypt ? authsize : (-authsize)),
2351 &dst_chained);
2352 } else {
2353 src_nents = sg_count(req->src,
2354 req->cryptlen +
2355 (encrypt ? authsize : 0),
2356 &src_chained);
2357 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002358
Yuan Kang643b39b2012-06-22 19:48:49 -05002359 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002360 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002361 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002362 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2363 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002364 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002365 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2366 DMA_TO_DEVICE, src_chained);
2367 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2368 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002369 }
2370
Yuan Kang1acebad2011-07-15 11:21:42 +08002371 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002372 if (dma_mapping_error(jrdev, iv_dma)) {
2373 dev_err(jrdev, "unable to map IV\n");
2374 return ERR_PTR(-ENOMEM);
2375 }
2376
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002377 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2378 OP_ALG_ALGSEL_AES) &&
2379 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2380 is_gcm = true;
2381
2382 /*
2383 * Check if data are contiguous.
2384 * GCM expected input sequence: IV, AAD, text
2385 * All other - expected input sequence: AAD, IV, text
2386 */
2387 if (is_gcm)
2388 all_contig = (!assoc_nents &&
2389 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2390 !src_nents && sg_dma_address(req->assoc) +
2391 req->assoclen == sg_dma_address(req->src));
2392 else
2393 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2394 req->assoclen == iv_dma && !src_nents &&
2395 iv_dma + ivsize == sg_dma_address(req->src));
2396 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08002397 assoc_nents = assoc_nents ? : 1;
2398 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002399 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002400 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002401
Yuan Kanga299c832012-06-22 19:48:46 -05002402 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002403
Yuan Kanga299c832012-06-22 19:48:46 -05002404 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002405
2406 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08002407 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002408 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002409 if (!edesc) {
2410 dev_err(jrdev, "could not allocate extended descriptor\n");
2411 return ERR_PTR(-ENOMEM);
2412 }
2413
2414 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002415 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002416 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002417 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002418 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002419 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002420 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002421 edesc->sec4_sg_bytes = sec4_sg_bytes;
2422 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2423 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002424 *all_contig_ptr = all_contig;
2425
Yuan Kanga299c832012-06-22 19:48:46 -05002426 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002427 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002428 if (!is_gcm) {
2429 sg_to_sec4_sg(req->assoc,
2430 (assoc_nents ? : 1),
2431 edesc->sec4_sg +
2432 sec4_sg_index, 0);
2433 sec4_sg_index += assoc_nents ? : 1;
2434 }
2435
Yuan Kanga299c832012-06-22 19:48:46 -05002436 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002437 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002438 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002439
2440 if (is_gcm) {
2441 sg_to_sec4_sg(req->assoc,
2442 (assoc_nents ? : 1),
2443 edesc->sec4_sg +
2444 sec4_sg_index, 0);
2445 sec4_sg_index += assoc_nents ? : 1;
2446 }
2447
Yuan Kanga299c832012-06-22 19:48:46 -05002448 sg_to_sec4_sg_last(req->src,
2449 (src_nents ? : 1),
2450 edesc->sec4_sg +
2451 sec4_sg_index, 0);
2452 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08002453 }
2454 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002455 sg_to_sec4_sg_last(req->dst, dst_nents,
2456 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002457 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302458 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2459 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002460 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2461 dev_err(jrdev, "unable to map S/G table\n");
2462 return ERR_PTR(-ENOMEM);
2463 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002464
2465 return edesc;
2466}
2467
Yuan Kang0e479302011-07-15 11:21:41 +08002468static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002469{
Yuan Kang0e479302011-07-15 11:21:41 +08002470 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002471 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002472 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2473 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002474 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002475 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002476 int ret = 0;
2477
Kim Phillips8e8ec592011-03-13 16:54:26 +08002478 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002479 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002480 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002481 if (IS_ERR(edesc))
2482 return PTR_ERR(edesc);
2483
Yuan Kang1acebad2011-07-15 11:21:42 +08002484 /* Create and submit job descriptor */
2485 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2486 all_contig, true);
2487#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002488 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002489 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2490 desc_bytes(edesc->hw_desc), 1);
2491#endif
2492
Kim Phillips8e8ec592011-03-13 16:54:26 +08002493 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002494 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2495 if (!ret) {
2496 ret = -EINPROGRESS;
2497 } else {
2498 aead_unmap(jrdev, edesc, req);
2499 kfree(edesc);
2500 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002501
Yuan Kang1acebad2011-07-15 11:21:42 +08002502 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002503}
2504
Yuan Kang0e479302011-07-15 11:21:41 +08002505static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002506{
Yuan Kang1acebad2011-07-15 11:21:42 +08002507 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002508 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002509 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2510 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002511 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002512 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002513 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002514
2515 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002516 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002517 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08002518 if (IS_ERR(edesc))
2519 return PTR_ERR(edesc);
2520
Yuan Kang1acebad2011-07-15 11:21:42 +08002521#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002522 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002523 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2524 req->cryptlen, 1);
2525#endif
2526
2527 /* Create and submit job descriptor*/
2528 init_aead_job(ctx->sh_desc_dec,
2529 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2530#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002531 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002532 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2533 desc_bytes(edesc->hw_desc), 1);
2534#endif
2535
Yuan Kang0e479302011-07-15 11:21:41 +08002536 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002537 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2538 if (!ret) {
2539 ret = -EINPROGRESS;
2540 } else {
2541 aead_unmap(jrdev, edesc, req);
2542 kfree(edesc);
2543 }
Yuan Kang0e479302011-07-15 11:21:41 +08002544
Yuan Kang1acebad2011-07-15 11:21:42 +08002545 return ret;
2546}
Yuan Kang0e479302011-07-15 11:21:41 +08002547
Yuan Kang1acebad2011-07-15 11:21:42 +08002548/*
2549 * allocate and map the aead extended descriptor for aead givencrypt
2550 */
2551static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2552 *greq, int desc_bytes,
2553 u32 *contig_ptr)
2554{
2555 struct aead_request *req = &greq->areq;
2556 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2557 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2558 struct device *jrdev = ctx->jrdev;
2559 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2560 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2561 int assoc_nents, src_nents, dst_nents = 0;
2562 struct aead_edesc *edesc;
2563 dma_addr_t iv_dma = 0;
2564 int sgc;
2565 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2566 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002567 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002568 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002569 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002570
Yuan Kang643b39b2012-06-22 19:48:49 -05002571 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2572 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002573
Yuan Kang1acebad2011-07-15 11:21:42 +08002574 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002575 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2576 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002577
Yuan Kang643b39b2012-06-22 19:48:49 -05002578 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002579 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002580 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002581 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2582 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002583 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002584 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2585 DMA_TO_DEVICE, src_chained);
2586 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2587 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002588 }
2589
Yuan Kang1acebad2011-07-15 11:21:42 +08002590 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002591 if (dma_mapping_error(jrdev, iv_dma)) {
2592 dev_err(jrdev, "unable to map IV\n");
2593 return ERR_PTR(-ENOMEM);
2594 }
2595
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002596 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2597 OP_ALG_ALGSEL_AES) &&
2598 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2599 is_gcm = true;
2600
2601 /*
2602 * Check if data are contiguous.
2603 * GCM expected input sequence: IV, AAD, text
2604 * All other - expected input sequence: AAD, IV, text
2605 */
2606
2607 if (is_gcm) {
2608 if (assoc_nents || iv_dma + ivsize !=
2609 sg_dma_address(req->assoc) || src_nents ||
2610 sg_dma_address(req->assoc) + req->assoclen !=
2611 sg_dma_address(req->src))
2612 contig &= ~GIV_SRC_CONTIG;
2613 } else {
2614 if (assoc_nents ||
2615 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2616 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2617 contig &= ~GIV_SRC_CONTIG;
2618 }
2619
Yuan Kang1acebad2011-07-15 11:21:42 +08002620 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2621 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002622
Yuan Kang1acebad2011-07-15 11:21:42 +08002623 if (!(contig & GIV_SRC_CONTIG)) {
2624 assoc_nents = assoc_nents ? : 1;
2625 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002626 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002627 if (req->src == req->dst &&
2628 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad2011-07-15 11:21:42 +08002629 contig &= ~GIV_DST_CONTIG;
2630 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002631
2632 /*
2633 * Add new sg entries for GCM output sequence.
2634 * Expected output sequence: IV, encrypted text.
2635 */
2636 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2637 sec4_sg_len += 1 + src_nents;
2638
2639 if (unlikely(req->src != req->dst)) {
2640 dst_nents = dst_nents ? : 1;
2641 sec4_sg_len += 1 + dst_nents;
2642 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002643
Yuan Kanga299c832012-06-22 19:48:46 -05002644 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002645
2646 /* allocate space for base edesc and hw desc commands, link tables */
2647 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002648 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002649 if (!edesc) {
2650 dev_err(jrdev, "could not allocate extended descriptor\n");
2651 return ERR_PTR(-ENOMEM);
2652 }
2653
2654 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002655 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002656 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002657 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002658 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002659 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002660 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002661 edesc->sec4_sg_bytes = sec4_sg_bytes;
2662 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2663 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002664 *contig_ptr = contig;
2665
Yuan Kanga299c832012-06-22 19:48:46 -05002666 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002667 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002668 if (!is_gcm) {
2669 sg_to_sec4_sg(req->assoc, assoc_nents,
2670 edesc->sec4_sg + sec4_sg_index, 0);
2671 sec4_sg_index += assoc_nents;
2672 }
2673
Yuan Kanga299c832012-06-22 19:48:46 -05002674 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002675 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002676 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002677
2678 if (is_gcm) {
2679 sg_to_sec4_sg(req->assoc, assoc_nents,
2680 edesc->sec4_sg + sec4_sg_index, 0);
2681 sec4_sg_index += assoc_nents;
2682 }
2683
Yuan Kanga299c832012-06-22 19:48:46 -05002684 sg_to_sec4_sg_last(req->src, src_nents,
2685 edesc->sec4_sg +
2686 sec4_sg_index, 0);
2687 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002688 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002689
2690 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2691 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2692 iv_dma, ivsize, 0);
2693 sec4_sg_index += 1;
2694 sg_to_sec4_sg_last(req->src, src_nents,
2695 edesc->sec4_sg + sec4_sg_index, 0);
2696 }
2697
Yuan Kang1acebad2011-07-15 11:21:42 +08002698 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002699 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002700 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002701 sec4_sg_index += 1;
2702 sg_to_sec4_sg_last(req->dst, dst_nents,
2703 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002704 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302705 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2706 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002707 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2708 dev_err(jrdev, "unable to map S/G table\n");
2709 return ERR_PTR(-ENOMEM);
2710 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002711
2712 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002713}
2714
2715static int aead_givencrypt(struct aead_givcrypt_request *areq)
2716{
2717 struct aead_request *req = &areq->areq;
2718 struct aead_edesc *edesc;
2719 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002720 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2721 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002722 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002723 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002724 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002725
Kim Phillips8e8ec592011-03-13 16:54:26 +08002726 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002727 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2728 CAAM_CMD_SZ, &contig);
2729
Kim Phillips8e8ec592011-03-13 16:54:26 +08002730 if (IS_ERR(edesc))
2731 return PTR_ERR(edesc);
2732
Yuan Kang1acebad2011-07-15 11:21:42 +08002733#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002734 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002735 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2736 req->cryptlen, 1);
2737#endif
2738
2739 /* Create and submit job descriptor*/
2740 init_aead_giv_job(ctx->sh_desc_givenc,
2741 ctx->sh_desc_givenc_dma, edesc, req, contig);
2742#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002743 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002744 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2745 desc_bytes(edesc->hw_desc), 1);
2746#endif
2747
Kim Phillips8e8ec592011-03-13 16:54:26 +08002748 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002749 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2750 if (!ret) {
2751 ret = -EINPROGRESS;
2752 } else {
2753 aead_unmap(jrdev, edesc, req);
2754 kfree(edesc);
2755 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002756
Yuan Kang1acebad2011-07-15 11:21:42 +08002757 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002758}
2759
Horia Geantaae4a8252014-03-14 17:46:52 +02002760static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
2761{
2762 return aead_encrypt(&areq->areq);
2763}
2764
Yuan Kangacdca312011-07-15 11:21:42 +08002765/*
2766 * allocate and map the ablkcipher extended descriptor for ablkcipher
2767 */
2768static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2769 *req, int desc_bytes,
2770 bool *iv_contig_out)
2771{
2772 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2773 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2774 struct device *jrdev = ctx->jrdev;
2775 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2776 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2777 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002778 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002779 struct ablkcipher_edesc *edesc;
2780 dma_addr_t iv_dma = 0;
2781 bool iv_contig = false;
2782 int sgc;
2783 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05002784 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002785 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002786
Yuan Kang643b39b2012-06-22 19:48:49 -05002787 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002788
Yuan Kang643b39b2012-06-22 19:48:49 -05002789 if (req->dst != req->src)
2790 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002791
2792 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002793 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2794 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002795 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002796 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2797 DMA_TO_DEVICE, src_chained);
2798 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2799 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002800 }
2801
Horia Geantace572082014-07-11 15:34:49 +03002802 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2803 if (dma_mapping_error(jrdev, iv_dma)) {
2804 dev_err(jrdev, "unable to map IV\n");
2805 return ERR_PTR(-ENOMEM);
2806 }
2807
Yuan Kangacdca312011-07-15 11:21:42 +08002808 /*
2809 * Check if iv can be contiguous with source and destination.
2810 * If so, include it. If not, create scatterlist.
2811 */
Yuan Kangacdca312011-07-15 11:21:42 +08002812 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2813 iv_contig = true;
2814 else
2815 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002816 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2817 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002818
2819 /* allocate space for base edesc and hw desc commands, link tables */
2820 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002821 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002822 if (!edesc) {
2823 dev_err(jrdev, "could not allocate extended descriptor\n");
2824 return ERR_PTR(-ENOMEM);
2825 }
2826
2827 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002828 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002829 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002830 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002831 edesc->sec4_sg_bytes = sec4_sg_bytes;
2832 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2833 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002834
Yuan Kanga299c832012-06-22 19:48:46 -05002835 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002836 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002837 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2838 sg_to_sec4_sg_last(req->src, src_nents,
2839 edesc->sec4_sg + 1, 0);
2840 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002841 }
2842
Yuan Kang643b39b2012-06-22 19:48:49 -05002843 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002844 sg_to_sec4_sg_last(req->dst, dst_nents,
2845 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002846 }
2847
Yuan Kanga299c832012-06-22 19:48:46 -05002848 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2849 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002850 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2851 dev_err(jrdev, "unable to map S/G table\n");
2852 return ERR_PTR(-ENOMEM);
2853 }
2854
Yuan Kangacdca312011-07-15 11:21:42 +08002855 edesc->iv_dma = iv_dma;
2856
2857#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002858 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002859 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2860 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002861#endif
2862
2863 *iv_contig_out = iv_contig;
2864 return edesc;
2865}
2866
2867static int ablkcipher_encrypt(struct ablkcipher_request *req)
2868{
2869 struct ablkcipher_edesc *edesc;
2870 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2871 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2872 struct device *jrdev = ctx->jrdev;
2873 bool iv_contig;
2874 u32 *desc;
2875 int ret = 0;
2876
2877 /* allocate extended descriptor */
2878 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2879 CAAM_CMD_SZ, &iv_contig);
2880 if (IS_ERR(edesc))
2881 return PTR_ERR(edesc);
2882
2883 /* Create and submit job descriptor*/
2884 init_ablkcipher_job(ctx->sh_desc_enc,
2885 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2886#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002887 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002888 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2889 desc_bytes(edesc->hw_desc), 1);
2890#endif
2891 desc = edesc->hw_desc;
2892 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2893
2894 if (!ret) {
2895 ret = -EINPROGRESS;
2896 } else {
2897 ablkcipher_unmap(jrdev, edesc, req);
2898 kfree(edesc);
2899 }
2900
2901 return ret;
2902}
2903
2904static int ablkcipher_decrypt(struct ablkcipher_request *req)
2905{
2906 struct ablkcipher_edesc *edesc;
2907 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2908 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2909 struct device *jrdev = ctx->jrdev;
2910 bool iv_contig;
2911 u32 *desc;
2912 int ret = 0;
2913
2914 /* allocate extended descriptor */
2915 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2916 CAAM_CMD_SZ, &iv_contig);
2917 if (IS_ERR(edesc))
2918 return PTR_ERR(edesc);
2919
2920 /* Create and submit job descriptor*/
2921 init_ablkcipher_job(ctx->sh_desc_dec,
2922 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2923 desc = edesc->hw_desc;
2924#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002925 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002926 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2927 desc_bytes(edesc->hw_desc), 1);
2928#endif
2929
2930 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2931 if (!ret) {
2932 ret = -EINPROGRESS;
2933 } else {
2934 ablkcipher_unmap(jrdev, edesc, req);
2935 kfree(edesc);
2936 }
2937
2938 return ret;
2939}
2940
Yuan Kang885e9e22011-07-15 11:21:41 +08002941#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002942#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002943struct caam_alg_template {
2944 char name[CRYPTO_MAX_ALG_NAME];
2945 char driver_name[CRYPTO_MAX_ALG_NAME];
2946 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002947 u32 type;
2948 union {
2949 struct ablkcipher_alg ablkcipher;
2950 struct aead_alg aead;
2951 struct blkcipher_alg blkcipher;
2952 struct cipher_alg cipher;
2953 struct compress_alg compress;
2954 struct rng_alg rng;
2955 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002956 u32 class1_alg_type;
2957 u32 class2_alg_type;
2958 u32 alg_op;
2959};
2960
2961static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02002962 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08002963 {
Horia Geantaae4a8252014-03-14 17:46:52 +02002964 .name = "authenc(hmac(md5),ecb(cipher_null))",
2965 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
2966 .blocksize = NULL_BLOCK_SIZE,
2967 .type = CRYPTO_ALG_TYPE_AEAD,
2968 .template_aead = {
2969 .setkey = aead_setkey,
2970 .setauthsize = aead_setauthsize,
2971 .encrypt = aead_encrypt,
2972 .decrypt = aead_decrypt,
2973 .givencrypt = aead_null_givencrypt,
2974 .geniv = "<built-in>",
2975 .ivsize = NULL_IV_SIZE,
2976 .maxauthsize = MD5_DIGEST_SIZE,
2977 },
2978 .class1_alg_type = 0,
2979 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2980 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2981 },
2982 {
2983 .name = "authenc(hmac(sha1),ecb(cipher_null))",
2984 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
2985 .blocksize = NULL_BLOCK_SIZE,
2986 .type = CRYPTO_ALG_TYPE_AEAD,
2987 .template_aead = {
2988 .setkey = aead_setkey,
2989 .setauthsize = aead_setauthsize,
2990 .encrypt = aead_encrypt,
2991 .decrypt = aead_decrypt,
2992 .givencrypt = aead_null_givencrypt,
2993 .geniv = "<built-in>",
2994 .ivsize = NULL_IV_SIZE,
2995 .maxauthsize = SHA1_DIGEST_SIZE,
2996 },
2997 .class1_alg_type = 0,
2998 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2999 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3000 },
3001 {
3002 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3003 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3004 .blocksize = NULL_BLOCK_SIZE,
3005 .type = CRYPTO_ALG_TYPE_AEAD,
3006 .template_aead = {
3007 .setkey = aead_setkey,
3008 .setauthsize = aead_setauthsize,
3009 .encrypt = aead_encrypt,
3010 .decrypt = aead_decrypt,
3011 .givencrypt = aead_null_givencrypt,
3012 .geniv = "<built-in>",
3013 .ivsize = NULL_IV_SIZE,
3014 .maxauthsize = SHA224_DIGEST_SIZE,
3015 },
3016 .class1_alg_type = 0,
3017 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3018 OP_ALG_AAI_HMAC_PRECOMP,
3019 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3020 },
3021 {
3022 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3023 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3024 .blocksize = NULL_BLOCK_SIZE,
3025 .type = CRYPTO_ALG_TYPE_AEAD,
3026 .template_aead = {
3027 .setkey = aead_setkey,
3028 .setauthsize = aead_setauthsize,
3029 .encrypt = aead_encrypt,
3030 .decrypt = aead_decrypt,
3031 .givencrypt = aead_null_givencrypt,
3032 .geniv = "<built-in>",
3033 .ivsize = NULL_IV_SIZE,
3034 .maxauthsize = SHA256_DIGEST_SIZE,
3035 },
3036 .class1_alg_type = 0,
3037 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3038 OP_ALG_AAI_HMAC_PRECOMP,
3039 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3040 },
3041 {
3042 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3043 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3044 .blocksize = NULL_BLOCK_SIZE,
3045 .type = CRYPTO_ALG_TYPE_AEAD,
3046 .template_aead = {
3047 .setkey = aead_setkey,
3048 .setauthsize = aead_setauthsize,
3049 .encrypt = aead_encrypt,
3050 .decrypt = aead_decrypt,
3051 .givencrypt = aead_null_givencrypt,
3052 .geniv = "<built-in>",
3053 .ivsize = NULL_IV_SIZE,
3054 .maxauthsize = SHA384_DIGEST_SIZE,
3055 },
3056 .class1_alg_type = 0,
3057 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3058 OP_ALG_AAI_HMAC_PRECOMP,
3059 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3060 },
3061 {
3062 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3063 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3064 .blocksize = NULL_BLOCK_SIZE,
3065 .type = CRYPTO_ALG_TYPE_AEAD,
3066 .template_aead = {
3067 .setkey = aead_setkey,
3068 .setauthsize = aead_setauthsize,
3069 .encrypt = aead_encrypt,
3070 .decrypt = aead_decrypt,
3071 .givencrypt = aead_null_givencrypt,
3072 .geniv = "<built-in>",
3073 .ivsize = NULL_IV_SIZE,
3074 .maxauthsize = SHA512_DIGEST_SIZE,
3075 },
3076 .class1_alg_type = 0,
3077 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3078 OP_ALG_AAI_HMAC_PRECOMP,
3079 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3080 },
3081 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003082 .name = "authenc(hmac(md5),cbc(aes))",
3083 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3084 .blocksize = AES_BLOCK_SIZE,
3085 .type = CRYPTO_ALG_TYPE_AEAD,
3086 .template_aead = {
3087 .setkey = aead_setkey,
3088 .setauthsize = aead_setauthsize,
3089 .encrypt = aead_encrypt,
3090 .decrypt = aead_decrypt,
3091 .givencrypt = aead_givencrypt,
3092 .geniv = "<built-in>",
3093 .ivsize = AES_BLOCK_SIZE,
3094 .maxauthsize = MD5_DIGEST_SIZE,
3095 },
3096 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3097 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3098 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3099 },
3100 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003101 .name = "authenc(hmac(sha1),cbc(aes))",
3102 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3103 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003104 .type = CRYPTO_ALG_TYPE_AEAD,
3105 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003106 .setkey = aead_setkey,
3107 .setauthsize = aead_setauthsize,
3108 .encrypt = aead_encrypt,
3109 .decrypt = aead_decrypt,
3110 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003111 .geniv = "<built-in>",
3112 .ivsize = AES_BLOCK_SIZE,
3113 .maxauthsize = SHA1_DIGEST_SIZE,
3114 },
3115 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3116 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3117 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3118 },
3119 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003120 .name = "authenc(hmac(sha224),cbc(aes))",
3121 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3122 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303123 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003124 .template_aead = {
3125 .setkey = aead_setkey,
3126 .setauthsize = aead_setauthsize,
3127 .encrypt = aead_encrypt,
3128 .decrypt = aead_decrypt,
3129 .givencrypt = aead_givencrypt,
3130 .geniv = "<built-in>",
3131 .ivsize = AES_BLOCK_SIZE,
3132 .maxauthsize = SHA224_DIGEST_SIZE,
3133 },
3134 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3135 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3136 OP_ALG_AAI_HMAC_PRECOMP,
3137 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3138 },
3139 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003140 .name = "authenc(hmac(sha256),cbc(aes))",
3141 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3142 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003143 .type = CRYPTO_ALG_TYPE_AEAD,
3144 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003145 .setkey = aead_setkey,
3146 .setauthsize = aead_setauthsize,
3147 .encrypt = aead_encrypt,
3148 .decrypt = aead_decrypt,
3149 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003150 .geniv = "<built-in>",
3151 .ivsize = AES_BLOCK_SIZE,
3152 .maxauthsize = SHA256_DIGEST_SIZE,
3153 },
3154 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3155 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3156 OP_ALG_AAI_HMAC_PRECOMP,
3157 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3158 },
3159 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003160 .name = "authenc(hmac(sha384),cbc(aes))",
3161 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3162 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303163 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003164 .template_aead = {
3165 .setkey = aead_setkey,
3166 .setauthsize = aead_setauthsize,
3167 .encrypt = aead_encrypt,
3168 .decrypt = aead_decrypt,
3169 .givencrypt = aead_givencrypt,
3170 .geniv = "<built-in>",
3171 .ivsize = AES_BLOCK_SIZE,
3172 .maxauthsize = SHA384_DIGEST_SIZE,
3173 },
3174 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3175 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3176 OP_ALG_AAI_HMAC_PRECOMP,
3177 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3178 },
3179
3180 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003181 .name = "authenc(hmac(sha512),cbc(aes))",
3182 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3183 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003184 .type = CRYPTO_ALG_TYPE_AEAD,
3185 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003186 .setkey = aead_setkey,
3187 .setauthsize = aead_setauthsize,
3188 .encrypt = aead_encrypt,
3189 .decrypt = aead_decrypt,
3190 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003191 .geniv = "<built-in>",
3192 .ivsize = AES_BLOCK_SIZE,
3193 .maxauthsize = SHA512_DIGEST_SIZE,
3194 },
3195 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3196 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3197 OP_ALG_AAI_HMAC_PRECOMP,
3198 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3199 },
3200 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003201 .name = "authenc(hmac(md5),cbc(des3_ede))",
3202 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3203 .blocksize = DES3_EDE_BLOCK_SIZE,
3204 .type = CRYPTO_ALG_TYPE_AEAD,
3205 .template_aead = {
3206 .setkey = aead_setkey,
3207 .setauthsize = aead_setauthsize,
3208 .encrypt = aead_encrypt,
3209 .decrypt = aead_decrypt,
3210 .givencrypt = aead_givencrypt,
3211 .geniv = "<built-in>",
3212 .ivsize = DES3_EDE_BLOCK_SIZE,
3213 .maxauthsize = MD5_DIGEST_SIZE,
3214 },
3215 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3216 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3217 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3218 },
3219 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003220 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3221 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3222 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003223 .type = CRYPTO_ALG_TYPE_AEAD,
3224 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003225 .setkey = aead_setkey,
3226 .setauthsize = aead_setauthsize,
3227 .encrypt = aead_encrypt,
3228 .decrypt = aead_decrypt,
3229 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003230 .geniv = "<built-in>",
3231 .ivsize = DES3_EDE_BLOCK_SIZE,
3232 .maxauthsize = SHA1_DIGEST_SIZE,
3233 },
3234 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3235 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3236 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3237 },
3238 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003239 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3240 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3241 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303242 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003243 .template_aead = {
3244 .setkey = aead_setkey,
3245 .setauthsize = aead_setauthsize,
3246 .encrypt = aead_encrypt,
3247 .decrypt = aead_decrypt,
3248 .givencrypt = aead_givencrypt,
3249 .geniv = "<built-in>",
3250 .ivsize = DES3_EDE_BLOCK_SIZE,
3251 .maxauthsize = SHA224_DIGEST_SIZE,
3252 },
3253 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3254 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3255 OP_ALG_AAI_HMAC_PRECOMP,
3256 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3257 },
3258 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003259 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3260 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3261 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003262 .type = CRYPTO_ALG_TYPE_AEAD,
3263 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003264 .setkey = aead_setkey,
3265 .setauthsize = aead_setauthsize,
3266 .encrypt = aead_encrypt,
3267 .decrypt = aead_decrypt,
3268 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003269 .geniv = "<built-in>",
3270 .ivsize = DES3_EDE_BLOCK_SIZE,
3271 .maxauthsize = SHA256_DIGEST_SIZE,
3272 },
3273 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3274 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3275 OP_ALG_AAI_HMAC_PRECOMP,
3276 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3277 },
3278 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003279 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3280 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3281 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303282 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003283 .template_aead = {
3284 .setkey = aead_setkey,
3285 .setauthsize = aead_setauthsize,
3286 .encrypt = aead_encrypt,
3287 .decrypt = aead_decrypt,
3288 .givencrypt = aead_givencrypt,
3289 .geniv = "<built-in>",
3290 .ivsize = DES3_EDE_BLOCK_SIZE,
3291 .maxauthsize = SHA384_DIGEST_SIZE,
3292 },
3293 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3294 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3295 OP_ALG_AAI_HMAC_PRECOMP,
3296 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3297 },
3298 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003299 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3300 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3301 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003302 .type = CRYPTO_ALG_TYPE_AEAD,
3303 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003304 .setkey = aead_setkey,
3305 .setauthsize = aead_setauthsize,
3306 .encrypt = aead_encrypt,
3307 .decrypt = aead_decrypt,
3308 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003309 .geniv = "<built-in>",
3310 .ivsize = DES3_EDE_BLOCK_SIZE,
3311 .maxauthsize = SHA512_DIGEST_SIZE,
3312 },
3313 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3314 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3315 OP_ALG_AAI_HMAC_PRECOMP,
3316 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3317 },
3318 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003319 .name = "authenc(hmac(md5),cbc(des))",
3320 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3321 .blocksize = DES_BLOCK_SIZE,
3322 .type = CRYPTO_ALG_TYPE_AEAD,
3323 .template_aead = {
3324 .setkey = aead_setkey,
3325 .setauthsize = aead_setauthsize,
3326 .encrypt = aead_encrypt,
3327 .decrypt = aead_decrypt,
3328 .givencrypt = aead_givencrypt,
3329 .geniv = "<built-in>",
3330 .ivsize = DES_BLOCK_SIZE,
3331 .maxauthsize = MD5_DIGEST_SIZE,
3332 },
3333 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3334 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3335 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3336 },
3337 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003338 .name = "authenc(hmac(sha1),cbc(des))",
3339 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3340 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003341 .type = CRYPTO_ALG_TYPE_AEAD,
3342 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003343 .setkey = aead_setkey,
3344 .setauthsize = aead_setauthsize,
3345 .encrypt = aead_encrypt,
3346 .decrypt = aead_decrypt,
3347 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003348 .geniv = "<built-in>",
3349 .ivsize = DES_BLOCK_SIZE,
3350 .maxauthsize = SHA1_DIGEST_SIZE,
3351 },
3352 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3353 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3354 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3355 },
3356 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003357 .name = "authenc(hmac(sha224),cbc(des))",
3358 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3359 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303360 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003361 .template_aead = {
3362 .setkey = aead_setkey,
3363 .setauthsize = aead_setauthsize,
3364 .encrypt = aead_encrypt,
3365 .decrypt = aead_decrypt,
3366 .givencrypt = aead_givencrypt,
3367 .geniv = "<built-in>",
3368 .ivsize = DES_BLOCK_SIZE,
3369 .maxauthsize = SHA224_DIGEST_SIZE,
3370 },
3371 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3372 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3373 OP_ALG_AAI_HMAC_PRECOMP,
3374 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3375 },
3376 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003377 .name = "authenc(hmac(sha256),cbc(des))",
3378 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3379 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003380 .type = CRYPTO_ALG_TYPE_AEAD,
3381 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003382 .setkey = aead_setkey,
3383 .setauthsize = aead_setauthsize,
3384 .encrypt = aead_encrypt,
3385 .decrypt = aead_decrypt,
3386 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003387 .geniv = "<built-in>",
3388 .ivsize = DES_BLOCK_SIZE,
3389 .maxauthsize = SHA256_DIGEST_SIZE,
3390 },
3391 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3392 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3393 OP_ALG_AAI_HMAC_PRECOMP,
3394 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3395 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05003396 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003397 .name = "authenc(hmac(sha384),cbc(des))",
3398 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3399 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303400 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003401 .template_aead = {
3402 .setkey = aead_setkey,
3403 .setauthsize = aead_setauthsize,
3404 .encrypt = aead_encrypt,
3405 .decrypt = aead_decrypt,
3406 .givencrypt = aead_givencrypt,
3407 .geniv = "<built-in>",
3408 .ivsize = DES_BLOCK_SIZE,
3409 .maxauthsize = SHA384_DIGEST_SIZE,
3410 },
3411 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3412 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3413 OP_ALG_AAI_HMAC_PRECOMP,
3414 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3415 },
3416 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003417 .name = "authenc(hmac(sha512),cbc(des))",
3418 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3419 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003420 .type = CRYPTO_ALG_TYPE_AEAD,
3421 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003422 .setkey = aead_setkey,
3423 .setauthsize = aead_setauthsize,
3424 .encrypt = aead_encrypt,
3425 .decrypt = aead_decrypt,
3426 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003427 .geniv = "<built-in>",
3428 .ivsize = DES_BLOCK_SIZE,
3429 .maxauthsize = SHA512_DIGEST_SIZE,
3430 },
3431 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3432 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3433 OP_ALG_AAI_HMAC_PRECOMP,
3434 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3435 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003436 {
3437 .name = "rfc4106(gcm(aes))",
3438 .driver_name = "rfc4106-gcm-aes-caam",
3439 .blocksize = 1,
3440 .type = CRYPTO_ALG_TYPE_AEAD,
3441 .template_aead = {
3442 .setkey = rfc4106_setkey,
3443 .setauthsize = rfc4106_setauthsize,
3444 .encrypt = aead_encrypt,
3445 .decrypt = aead_decrypt,
3446 .givencrypt = aead_givencrypt,
3447 .geniv = "<built-in>",
3448 .ivsize = 8,
3449 .maxauthsize = AES_BLOCK_SIZE,
3450 },
3451 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3452 },
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02003453 {
3454 .name = "rfc4543(gcm(aes))",
3455 .driver_name = "rfc4543-gcm-aes-caam",
3456 .blocksize = 1,
3457 .type = CRYPTO_ALG_TYPE_AEAD,
3458 .template_aead = {
3459 .setkey = rfc4543_setkey,
3460 .setauthsize = rfc4543_setauthsize,
3461 .encrypt = aead_encrypt,
3462 .decrypt = aead_decrypt,
3463 .givencrypt = aead_givencrypt,
3464 .geniv = "<built-in>",
3465 .ivsize = 8,
3466 .maxauthsize = AES_BLOCK_SIZE,
3467 },
3468 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3469 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03003470 /* Galois Counter Mode */
3471 {
3472 .name = "gcm(aes)",
3473 .driver_name = "gcm-aes-caam",
3474 .blocksize = 1,
3475 .type = CRYPTO_ALG_TYPE_AEAD,
3476 .template_aead = {
3477 .setkey = gcm_setkey,
3478 .setauthsize = gcm_setauthsize,
3479 .encrypt = aead_encrypt,
3480 .decrypt = aead_decrypt,
3481 .givencrypt = NULL,
3482 .geniv = "<built-in>",
3483 .ivsize = 12,
3484 .maxauthsize = AES_BLOCK_SIZE,
3485 },
3486 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3487 },
Yuan Kangacdca312011-07-15 11:21:42 +08003488 /* ablkcipher descriptor */
3489 {
3490 .name = "cbc(aes)",
3491 .driver_name = "cbc-aes-caam",
3492 .blocksize = AES_BLOCK_SIZE,
3493 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3494 .template_ablkcipher = {
3495 .setkey = ablkcipher_setkey,
3496 .encrypt = ablkcipher_encrypt,
3497 .decrypt = ablkcipher_decrypt,
3498 .geniv = "eseqiv",
3499 .min_keysize = AES_MIN_KEY_SIZE,
3500 .max_keysize = AES_MAX_KEY_SIZE,
3501 .ivsize = AES_BLOCK_SIZE,
3502 },
3503 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3504 },
3505 {
3506 .name = "cbc(des3_ede)",
3507 .driver_name = "cbc-3des-caam",
3508 .blocksize = DES3_EDE_BLOCK_SIZE,
3509 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3510 .template_ablkcipher = {
3511 .setkey = ablkcipher_setkey,
3512 .encrypt = ablkcipher_encrypt,
3513 .decrypt = ablkcipher_decrypt,
3514 .geniv = "eseqiv",
3515 .min_keysize = DES3_EDE_KEY_SIZE,
3516 .max_keysize = DES3_EDE_KEY_SIZE,
3517 .ivsize = DES3_EDE_BLOCK_SIZE,
3518 },
3519 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3520 },
3521 {
3522 .name = "cbc(des)",
3523 .driver_name = "cbc-des-caam",
3524 .blocksize = DES_BLOCK_SIZE,
3525 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3526 .template_ablkcipher = {
3527 .setkey = ablkcipher_setkey,
3528 .encrypt = ablkcipher_encrypt,
3529 .decrypt = ablkcipher_decrypt,
3530 .geniv = "eseqiv",
3531 .min_keysize = DES_KEY_SIZE,
3532 .max_keysize = DES_KEY_SIZE,
3533 .ivsize = DES_BLOCK_SIZE,
3534 },
3535 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3536 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003537};
3538
3539struct caam_crypto_alg {
3540 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003541 int class1_alg_type;
3542 int class2_alg_type;
3543 int alg_op;
3544 struct crypto_alg crypto_alg;
3545};
3546
3547static int caam_cra_init(struct crypto_tfm *tfm)
3548{
3549 struct crypto_alg *alg = tfm->__crt_alg;
3550 struct caam_crypto_alg *caam_alg =
3551 container_of(alg, struct caam_crypto_alg, crypto_alg);
3552 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003553
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303554 ctx->jrdev = caam_jr_alloc();
3555 if (IS_ERR(ctx->jrdev)) {
3556 pr_err("Job Ring Device allocation for transform failed\n");
3557 return PTR_ERR(ctx->jrdev);
3558 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003559
3560 /* copy descriptor header template value */
3561 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
3562 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
3563 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
3564
3565 return 0;
3566}
3567
3568static void caam_cra_exit(struct crypto_tfm *tfm)
3569{
3570 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3571
Yuan Kang1acebad2011-07-15 11:21:42 +08003572 if (ctx->sh_desc_enc_dma &&
3573 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3574 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3575 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3576 if (ctx->sh_desc_dec_dma &&
3577 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3578 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3579 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3580 if (ctx->sh_desc_givenc_dma &&
3581 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3582 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3583 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003584 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003585 if (ctx->key_dma &&
3586 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3587 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3588 ctx->enckeylen + ctx->split_key_pad_len,
3589 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303590
3591 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003592}
3593
3594static void __exit caam_algapi_exit(void)
3595{
3596
Kim Phillips8e8ec592011-03-13 16:54:26 +08003597 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003598
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303599 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003600 return;
3601
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303602 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003603 crypto_unregister_alg(&t_alg->crypto_alg);
3604 list_del(&t_alg->entry);
3605 kfree(t_alg);
3606 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003607}
3608
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303609static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003610 *template)
3611{
3612 struct caam_crypto_alg *t_alg;
3613 struct crypto_alg *alg;
3614
3615 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
3616 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303617 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003618 return ERR_PTR(-ENOMEM);
3619 }
3620
3621 alg = &t_alg->crypto_alg;
3622
3623 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3624 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3625 template->driver_name);
3626 alg->cra_module = THIS_MODULE;
3627 alg->cra_init = caam_cra_init;
3628 alg->cra_exit = caam_cra_exit;
3629 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003630 alg->cra_blocksize = template->blocksize;
3631 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003632 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003633 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3634 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003635 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08003636 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3637 alg->cra_type = &crypto_ablkcipher_type;
3638 alg->cra_ablkcipher = template->template_ablkcipher;
3639 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003640 case CRYPTO_ALG_TYPE_AEAD:
3641 alg->cra_type = &crypto_aead_type;
3642 alg->cra_aead = template->template_aead;
3643 break;
3644 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003645
3646 t_alg->class1_alg_type = template->class1_alg_type;
3647 t_alg->class2_alg_type = template->class2_alg_type;
3648 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003649
3650 return t_alg;
3651}
3652
3653static int __init caam_algapi_init(void)
3654{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303655 struct device_node *dev_node;
3656 struct platform_device *pdev;
3657 struct device *ctrldev;
3658 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003659 int i = 0, err = 0;
3660
Ruchika Gupta35af6402014-07-07 10:42:12 +05303661 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3662 if (!dev_node) {
3663 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3664 if (!dev_node)
3665 return -ENODEV;
3666 }
3667
3668 pdev = of_find_device_by_node(dev_node);
3669 if (!pdev) {
3670 of_node_put(dev_node);
3671 return -ENODEV;
3672 }
3673
3674 ctrldev = &pdev->dev;
3675 priv = dev_get_drvdata(ctrldev);
3676 of_node_put(dev_node);
3677
3678 /*
3679 * If priv is NULL, it's probably because the caam driver wasn't
3680 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3681 */
3682 if (!priv)
3683 return -ENODEV;
3684
3685
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303686 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003687
3688 /* register crypto algorithms the device supports */
3689 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3690 /* TODO: check if h/w supports alg */
3691 struct caam_crypto_alg *t_alg;
3692
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303693 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003694 if (IS_ERR(t_alg)) {
3695 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303696 pr_warn("%s alg allocation failed\n",
3697 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003698 continue;
3699 }
3700
3701 err = crypto_register_alg(&t_alg->crypto_alg);
3702 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303703 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08003704 t_alg->crypto_alg.cra_driver_name);
3705 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02003706 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303707 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003708 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303709 if (!list_empty(&alg_list))
3710 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003711
3712 return err;
3713}
3714
3715module_init(caam_algapi_init);
3716module_exit(caam_algapi_exit);
3717
3718MODULE_LICENSE("GPL");
3719MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3720MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");