blob: 2dc85f8304bb541db8a229f2519f34ef8aff2ab2 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030077#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
78#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
79#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
80
Tudor Ambarusbac68f22014-10-23 16:14:03 +030081#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
82#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
83#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
84#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
85
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020086#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
87#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
88#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
89#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
90
Yuan Kangacdca312011-07-15 11:21:42 +080091#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
92#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
93 20 * CAAM_CMD_SZ)
94#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
95 15 * CAAM_CMD_SZ)
96
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020097#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
Yuan Kang1acebad2011-07-15 11:21:42 +080098 CAAM_MAX_KEY_SIZE)
99#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500100
Kim Phillips8e8ec592011-03-13 16:54:26 +0800101#ifdef DEBUG
102/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800103#define debug(format, arg...) printk(format, arg)
104#else
105#define debug(format, arg...)
106#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530107static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108
Yuan Kang1acebad2011-07-15 11:21:42 +0800109/* Set DK bit in class 1 operation if shared */
110static inline void append_dec_op1(u32 *desc, u32 type)
111{
112 u32 *jump_cmd, *uncond_jump_cmd;
113
Horia Geantaa60384d2014-07-11 15:46:58 +0300114 /* DK bit is valid only for AES */
115 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
116 append_operation(desc, type | OP_ALG_AS_INITFINAL |
117 OP_ALG_DECRYPT);
118 return;
119 }
120
Yuan Kang1acebad2011-07-15 11:21:42 +0800121 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
122 append_operation(desc, type | OP_ALG_AS_INITFINAL |
123 OP_ALG_DECRYPT);
124 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
125 set_jump_tgt_here(desc, jump_cmd);
126 append_operation(desc, type | OP_ALG_AS_INITFINAL |
127 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
128 set_jump_tgt_here(desc, uncond_jump_cmd);
129}
130
131/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800132 * For aead functions, read payload and write payload,
133 * both of which are specified in req->src and req->dst
134 */
135static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
136{
Horia Geantaae4a8252014-03-14 17:46:52 +0200137 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800138 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
139 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800140}
141
142/*
143 * For aead encrypt and decrypt, read iv for both classes
144 */
145static inline void aead_append_ld_iv(u32 *desc, int ivsize)
146{
147 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
148 LDST_CLASS_1_CCB | ivsize);
149 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
150}
151
152/*
Yuan Kangacdca312011-07-15 11:21:42 +0800153 * For ablkcipher encrypt and decrypt, read from req->src and
154 * write to req->dst
155 */
156static inline void ablkcipher_append_src_dst(u32 *desc)
157{
Kim Phillips70d793c2012-06-22 19:42:35 -0500158 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
159 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
160 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
161 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
162 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800163}
164
165/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800166 * If all data, including src (with assoc and iv) or dst (with iv only) are
167 * contiguous
168 */
169#define GIV_SRC_CONTIG 1
170#define GIV_DST_CONTIG (1 << 1)
171
Kim Phillips8e8ec592011-03-13 16:54:26 +0800172/*
173 * per-session context
174 */
175struct caam_ctx {
176 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800177 u32 sh_desc_enc[DESC_MAX_USED_LEN];
178 u32 sh_desc_dec[DESC_MAX_USED_LEN];
179 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
180 dma_addr_t sh_desc_enc_dma;
181 dma_addr_t sh_desc_dec_dma;
182 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800183 u32 class1_alg_type;
184 u32 class2_alg_type;
185 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800186 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800187 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800188 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800189 unsigned int split_key_len;
190 unsigned int split_key_pad_len;
191 unsigned int authsize;
192};
193
Yuan Kang1acebad2011-07-15 11:21:42 +0800194static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
195 int keys_fit_inline)
196{
197 if (keys_fit_inline) {
198 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
199 ctx->split_key_len, CLASS_2 |
200 KEY_DEST_MDHA_SPLIT | KEY_ENC);
201 append_key_as_imm(desc, (void *)ctx->key +
202 ctx->split_key_pad_len, ctx->enckeylen,
203 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
204 } else {
205 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
206 KEY_DEST_MDHA_SPLIT | KEY_ENC);
207 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
208 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
209 }
210}
211
212static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
213 int keys_fit_inline)
214{
215 u32 *key_jump_cmd;
216
Kim Phillips61bb86b2012-07-13 17:49:28 -0500217 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800218
219 /* Skip if already shared */
220 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
221 JUMP_COND_SHRD);
222
223 append_key_aead(desc, ctx, keys_fit_inline);
224
225 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800226}
227
Horia Geantaae4a8252014-03-14 17:46:52 +0200228static int aead_null_set_sh_desc(struct crypto_aead *aead)
229{
230 struct aead_tfm *tfm = &aead->base.crt_aead;
231 struct caam_ctx *ctx = crypto_aead_ctx(aead);
232 struct device *jrdev = ctx->jrdev;
233 bool keys_fit_inline = false;
234 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
235 u32 *desc;
236
237 /*
238 * Job Descriptor and Shared Descriptors
239 * must all fit into the 64-word Descriptor h/w Buffer
240 */
241 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
242 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
243 keys_fit_inline = true;
244
245 /* aead_encrypt shared descriptor */
246 desc = ctx->sh_desc_enc;
247
248 init_sh_desc(desc, HDR_SHARE_SERIAL);
249
250 /* Skip if already shared */
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252 JUMP_COND_SHRD);
253 if (keys_fit_inline)
254 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
255 ctx->split_key_len, CLASS_2 |
256 KEY_DEST_MDHA_SPLIT | KEY_ENC);
257 else
258 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
259 KEY_DEST_MDHA_SPLIT | KEY_ENC);
260 set_jump_tgt_here(desc, key_jump_cmd);
261
262 /* cryptlen = seqoutlen - authsize */
263 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
264
265 /*
266 * NULL encryption; IV is zero
267 * assoclen = (assoclen + cryptlen) - cryptlen
268 */
269 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
270
271 /* read assoc before reading payload */
272 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
273 KEY_VLF);
274
275 /* Prepare to read and write cryptlen bytes */
276 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
277 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
278
279 /*
280 * MOVE_LEN opcode is not available in all SEC HW revisions,
281 * thus need to do some magic, i.e. self-patch the descriptor
282 * buffer.
283 */
284 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
285 MOVE_DEST_MATH3 |
286 (0x6 << MOVE_LEN_SHIFT));
287 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
288 MOVE_DEST_DESCBUF |
289 MOVE_WAITCOMP |
290 (0x8 << MOVE_LEN_SHIFT));
291
292 /* Class 2 operation */
293 append_operation(desc, ctx->class2_alg_type |
294 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
295
296 /* Read and write cryptlen bytes */
297 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
298
299 set_move_tgt_here(desc, read_move_cmd);
300 set_move_tgt_here(desc, write_move_cmd);
301 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
302 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
303 MOVE_AUX_LS);
304
305 /* Write ICV */
306 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
307 LDST_SRCDST_BYTE_CONTEXT);
308
309 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
310 desc_bytes(desc),
311 DMA_TO_DEVICE);
312 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
313 dev_err(jrdev, "unable to map shared descriptor\n");
314 return -ENOMEM;
315 }
316#ifdef DEBUG
317 print_hex_dump(KERN_ERR,
318 "aead null enc shdesc@"__stringify(__LINE__)": ",
319 DUMP_PREFIX_ADDRESS, 16, 4, desc,
320 desc_bytes(desc), 1);
321#endif
322
323 /*
324 * Job Descriptor and Shared Descriptors
325 * must all fit into the 64-word Descriptor h/w Buffer
326 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500327 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200328 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
329 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
330 keys_fit_inline = true;
331
332 desc = ctx->sh_desc_dec;
333
334 /* aead_decrypt shared descriptor */
335 init_sh_desc(desc, HDR_SHARE_SERIAL);
336
337 /* Skip if already shared */
338 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
339 JUMP_COND_SHRD);
340 if (keys_fit_inline)
341 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
342 ctx->split_key_len, CLASS_2 |
343 KEY_DEST_MDHA_SPLIT | KEY_ENC);
344 else
345 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
346 KEY_DEST_MDHA_SPLIT | KEY_ENC);
347 set_jump_tgt_here(desc, key_jump_cmd);
348
349 /* Class 2 operation */
350 append_operation(desc, ctx->class2_alg_type |
351 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
352
353 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
354 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
355 ctx->authsize + tfm->ivsize);
356 /* assoclen = (assoclen + cryptlen) - cryptlen */
357 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
358 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
359
360 /* read assoc before reading payload */
361 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
362 KEY_VLF);
363
364 /* Prepare to read and write cryptlen bytes */
365 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
366 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
367
368 /*
369 * MOVE_LEN opcode is not available in all SEC HW revisions,
370 * thus need to do some magic, i.e. self-patch the descriptor
371 * buffer.
372 */
373 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
374 MOVE_DEST_MATH2 |
375 (0x6 << MOVE_LEN_SHIFT));
376 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
377 MOVE_DEST_DESCBUF |
378 MOVE_WAITCOMP |
379 (0x8 << MOVE_LEN_SHIFT));
380
381 /* Read and write cryptlen bytes */
382 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
383
384 /*
385 * Insert a NOP here, since we need at least 4 instructions between
386 * code patching the descriptor buffer and the location being patched.
387 */
388 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
389 set_jump_tgt_here(desc, jump_cmd);
390
391 set_move_tgt_here(desc, read_move_cmd);
392 set_move_tgt_here(desc, write_move_cmd);
393 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
394 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
395 MOVE_AUX_LS);
396 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
397
398 /* Load ICV */
399 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
400 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
401
402 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
403 desc_bytes(desc),
404 DMA_TO_DEVICE);
405 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
406 dev_err(jrdev, "unable to map shared descriptor\n");
407 return -ENOMEM;
408 }
409#ifdef DEBUG
410 print_hex_dump(KERN_ERR,
411 "aead null dec shdesc@"__stringify(__LINE__)": ",
412 DUMP_PREFIX_ADDRESS, 16, 4, desc,
413 desc_bytes(desc), 1);
414#endif
415
416 return 0;
417}
418
Yuan Kang1acebad2011-07-15 11:21:42 +0800419static int aead_set_sh_desc(struct crypto_aead *aead)
420{
421 struct aead_tfm *tfm = &aead->base.crt_aead;
422 struct caam_ctx *ctx = crypto_aead_ctx(aead);
423 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800424 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800425 u32 geniv, moveiv;
426 u32 *desc;
427
Horia Geantaae4a8252014-03-14 17:46:52 +0200428 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800429 return 0;
430
Horia Geantaae4a8252014-03-14 17:46:52 +0200431 /* NULL encryption / decryption */
432 if (!ctx->enckeylen)
433 return aead_null_set_sh_desc(aead);
434
Yuan Kang1acebad2011-07-15 11:21:42 +0800435 /*
436 * Job Descriptor and Shared Descriptors
437 * must all fit into the 64-word Descriptor h/w Buffer
438 */
439 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
440 ctx->split_key_pad_len + ctx->enckeylen <=
441 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800442 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800443
444 /* aead_encrypt shared descriptor */
445 desc = ctx->sh_desc_enc;
446
447 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
448
449 /* Class 2 operation */
450 append_operation(desc, ctx->class2_alg_type |
451 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
452
453 /* cryptlen = seqoutlen - authsize */
454 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
455
456 /* assoclen + cryptlen = seqinlen - ivsize */
457 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
458
Horia Geanta4464a7d2014-03-14 17:46:49 +0200459 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800460 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
461
462 /* read assoc before reading payload */
463 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
464 KEY_VLF);
465 aead_append_ld_iv(desc, tfm->ivsize);
466
467 /* Class 1 operation */
468 append_operation(desc, ctx->class1_alg_type |
469 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
470
471 /* Read and write cryptlen bytes */
472 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
473 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
474 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
475
476 /* Write ICV */
477 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
479
480 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
481 desc_bytes(desc),
482 DMA_TO_DEVICE);
483 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
484 dev_err(jrdev, "unable to map shared descriptor\n");
485 return -ENOMEM;
486 }
487#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300488 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800489 DUMP_PREFIX_ADDRESS, 16, 4, desc,
490 desc_bytes(desc), 1);
491#endif
492
493 /*
494 * Job Descriptor and Shared Descriptors
495 * must all fit into the 64-word Descriptor h/w Buffer
496 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500497 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800498 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
499 ctx->split_key_pad_len + ctx->enckeylen <=
500 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800501 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800502
Horia Geanta4464a7d2014-03-14 17:46:49 +0200503 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800504 desc = ctx->sh_desc_dec;
505
Horia Geanta4464a7d2014-03-14 17:46:49 +0200506 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800507
508 /* Class 2 operation */
509 append_operation(desc, ctx->class2_alg_type |
510 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
511
Horia Geanta4464a7d2014-03-14 17:46:49 +0200512 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800513 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200514 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800515 /* assoclen = (assoclen + cryptlen) - cryptlen */
516 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
517 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
518
519 /* read assoc before reading payload */
520 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
521 KEY_VLF);
522
523 aead_append_ld_iv(desc, tfm->ivsize);
524
525 append_dec_op1(desc, ctx->class1_alg_type);
526
527 /* Read and write cryptlen bytes */
528 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
529 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
530 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
531
532 /* Load ICV */
533 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
534 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800535
536 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
537 desc_bytes(desc),
538 DMA_TO_DEVICE);
539 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
540 dev_err(jrdev, "unable to map shared descriptor\n");
541 return -ENOMEM;
542 }
543#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300544 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800545 DUMP_PREFIX_ADDRESS, 16, 4, desc,
546 desc_bytes(desc), 1);
547#endif
548
549 /*
550 * Job Descriptor and Shared Descriptors
551 * must all fit into the 64-word Descriptor h/w Buffer
552 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500553 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800554 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
555 ctx->split_key_pad_len + ctx->enckeylen <=
556 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800557 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800558
559 /* aead_givencrypt shared descriptor */
560 desc = ctx->sh_desc_givenc;
561
562 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
563
564 /* Generate IV */
565 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
566 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
567 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
568 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
569 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
570 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
571 append_move(desc, MOVE_SRC_INFIFO |
572 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
573 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
574
575 /* Copy IV to class 1 context */
576 append_move(desc, MOVE_SRC_CLASS1CTX |
577 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
578
579 /* Return to encryption */
580 append_operation(desc, ctx->class2_alg_type |
581 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
582
583 /* ivsize + cryptlen = seqoutlen - authsize */
584 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
585
586 /* assoclen = seqinlen - (ivsize + cryptlen) */
587 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
588
589 /* read assoc before reading payload */
590 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
591 KEY_VLF);
592
593 /* Copy iv from class 1 ctx to class 2 fifo*/
594 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
595 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
596 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
597 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
598 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
599 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
600
601 /* Class 1 operation */
602 append_operation(desc, ctx->class1_alg_type |
603 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
604
605 /* Will write ivsize + cryptlen */
606 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
607
608 /* Not need to reload iv */
609 append_seq_fifo_load(desc, tfm->ivsize,
610 FIFOLD_CLASS_SKIP);
611
612 /* Will read cryptlen */
613 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
614 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
615
616 /* Write ICV */
617 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
618 LDST_SRCDST_BYTE_CONTEXT);
619
620 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
621 desc_bytes(desc),
622 DMA_TO_DEVICE);
623 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
624 dev_err(jrdev, "unable to map shared descriptor\n");
625 return -ENOMEM;
626 }
627#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300628 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800629 DUMP_PREFIX_ADDRESS, 16, 4, desc,
630 desc_bytes(desc), 1);
631#endif
632
633 return 0;
634}
635
Yuan Kang0e479302011-07-15 11:21:41 +0800636static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800637 unsigned int authsize)
638{
639 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
640
641 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800642 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800643
644 return 0;
645}
646
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300647static int gcm_set_sh_desc(struct crypto_aead *aead)
648{
649 struct aead_tfm *tfm = &aead->base.crt_aead;
650 struct caam_ctx *ctx = crypto_aead_ctx(aead);
651 struct device *jrdev = ctx->jrdev;
652 bool keys_fit_inline = false;
653 u32 *key_jump_cmd, *zero_payload_jump_cmd,
654 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
655 u32 *desc;
656
657 if (!ctx->enckeylen || !ctx->authsize)
658 return 0;
659
660 /*
661 * AES GCM encrypt shared descriptor
662 * Job Descriptor and Shared Descriptor
663 * must fit into the 64-word Descriptor h/w Buffer
664 */
665 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
666 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
667 keys_fit_inline = true;
668
669 desc = ctx->sh_desc_enc;
670
671 init_sh_desc(desc, HDR_SHARE_SERIAL);
672
673 /* skip key loading if they are loaded due to sharing */
674 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
675 JUMP_COND_SHRD | JUMP_COND_SELF);
676 if (keys_fit_inline)
677 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
678 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
679 else
680 append_key(desc, ctx->key_dma, ctx->enckeylen,
681 CLASS_1 | KEY_DEST_CLASS_REG);
682 set_jump_tgt_here(desc, key_jump_cmd);
683
684 /* class 1 operation */
685 append_operation(desc, ctx->class1_alg_type |
686 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
687
688 /* cryptlen = seqoutlen - authsize */
689 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
690
691 /* assoclen + cryptlen = seqinlen - ivsize */
692 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
693
694 /* assoclen = (assoclen + cryptlen) - cryptlen */
695 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
696
697 /* if cryptlen is ZERO jump to zero-payload commands */
698 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
699 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
700 JUMP_COND_MATH_Z);
701 /* read IV */
702 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
703 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
704
705 /* if assoclen is ZERO, skip reading the assoc data */
706 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
707 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
708 JUMP_COND_MATH_Z);
709
710 /* read assoc data */
711 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
712 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
713 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
714
715 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
716
717 /* write encrypted data */
718 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
719
720 /* read payload data */
721 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
722 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
723
724 /* jump the zero-payload commands */
725 append_jump(desc, JUMP_TEST_ALL | 7);
726
727 /* zero-payload commands */
728 set_jump_tgt_here(desc, zero_payload_jump_cmd);
729
730 /* if assoclen is ZERO, jump to IV reading - is the only input data */
731 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
732 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
733 JUMP_COND_MATH_Z);
734 /* read IV */
735 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
736 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
737
738 /* read assoc data */
739 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
740 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
741
742 /* jump to ICV writing */
743 append_jump(desc, JUMP_TEST_ALL | 2);
744
745 /* read IV - is the only input data */
746 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
747 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
748 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
749 FIFOLD_TYPE_LAST1);
750
751 /* write ICV */
752 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
753 LDST_SRCDST_BYTE_CONTEXT);
754
755 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
756 desc_bytes(desc),
757 DMA_TO_DEVICE);
758 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
759 dev_err(jrdev, "unable to map shared descriptor\n");
760 return -ENOMEM;
761 }
762#ifdef DEBUG
763 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
764 DUMP_PREFIX_ADDRESS, 16, 4, desc,
765 desc_bytes(desc), 1);
766#endif
767
768 /*
769 * Job Descriptor and Shared Descriptors
770 * must all fit into the 64-word Descriptor h/w Buffer
771 */
772 keys_fit_inline = false;
773 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
774 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
775 keys_fit_inline = true;
776
777 desc = ctx->sh_desc_dec;
778
779 init_sh_desc(desc, HDR_SHARE_SERIAL);
780
781 /* skip key loading if they are loaded due to sharing */
782 key_jump_cmd = append_jump(desc, JUMP_JSL |
783 JUMP_TEST_ALL | JUMP_COND_SHRD |
784 JUMP_COND_SELF);
785 if (keys_fit_inline)
786 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
787 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
788 else
789 append_key(desc, ctx->key_dma, ctx->enckeylen,
790 CLASS_1 | KEY_DEST_CLASS_REG);
791 set_jump_tgt_here(desc, key_jump_cmd);
792
793 /* class 1 operation */
794 append_operation(desc, ctx->class1_alg_type |
795 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
796
797 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
798 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
799 ctx->authsize + tfm->ivsize);
800
801 /* assoclen = (assoclen + cryptlen) - cryptlen */
802 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
803 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
804
805 /* read IV */
806 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
807 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
808
809 /* jump to zero-payload command if cryptlen is zero */
810 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
811 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
812 JUMP_COND_MATH_Z);
813
814 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
815 /* if asoclen is ZERO, skip reading assoc data */
816 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
817 JUMP_COND_MATH_Z);
818 /* read assoc data */
819 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
820 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
821 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
822
823 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
824
825 /* store encrypted data */
826 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
827
828 /* read payload data */
829 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
830 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
831
832 /* jump the zero-payload commands */
833 append_jump(desc, JUMP_TEST_ALL | 4);
834
835 /* zero-payload command */
836 set_jump_tgt_here(desc, zero_payload_jump_cmd);
837
838 /* if assoclen is ZERO, jump to ICV reading */
839 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
840 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
841 JUMP_COND_MATH_Z);
842 /* read assoc data */
843 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
844 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
845 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
846
847 /* read ICV */
848 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
849 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
850
851 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
852 desc_bytes(desc),
853 DMA_TO_DEVICE);
854 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
855 dev_err(jrdev, "unable to map shared descriptor\n");
856 return -ENOMEM;
857 }
858#ifdef DEBUG
859 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
860 DUMP_PREFIX_ADDRESS, 16, 4, desc,
861 desc_bytes(desc), 1);
862#endif
863
864 return 0;
865}
866
867static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
868{
869 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
870
871 ctx->authsize = authsize;
872 gcm_set_sh_desc(authenc);
873
874 return 0;
875}
876
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300877static int rfc4106_set_sh_desc(struct crypto_aead *aead)
878{
879 struct aead_tfm *tfm = &aead->base.crt_aead;
880 struct caam_ctx *ctx = crypto_aead_ctx(aead);
881 struct device *jrdev = ctx->jrdev;
882 bool keys_fit_inline = false;
883 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
884 u32 *desc;
885 u32 geniv;
886
887 if (!ctx->enckeylen || !ctx->authsize)
888 return 0;
889
890 /*
891 * RFC4106 encrypt shared descriptor
892 * Job Descriptor and Shared Descriptor
893 * must fit into the 64-word Descriptor h/w Buffer
894 */
895 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
896 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
897 keys_fit_inline = true;
898
899 desc = ctx->sh_desc_enc;
900
901 init_sh_desc(desc, HDR_SHARE_SERIAL);
902
903 /* Skip key loading if it is loaded due to sharing */
904 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
905 JUMP_COND_SHRD);
906 if (keys_fit_inline)
907 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
908 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
909 else
910 append_key(desc, ctx->key_dma, ctx->enckeylen,
911 CLASS_1 | KEY_DEST_CLASS_REG);
912 set_jump_tgt_here(desc, key_jump_cmd);
913
914 /* Class 1 operation */
915 append_operation(desc, ctx->class1_alg_type |
916 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
917
918 /* cryptlen = seqoutlen - authsize */
919 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
920 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
921
922 /* assoclen + cryptlen = seqinlen - ivsize */
923 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
924
925 /* assoclen = (assoclen + cryptlen) - cryptlen */
926 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
927
928 /* Read Salt */
929 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
930 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
931 /* Read AES-GCM-ESP IV */
932 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
933 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
934
935 /* Read assoc data */
936 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
937 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
938
939 /* Will read cryptlen bytes */
940 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
941
942 /* Write encrypted data */
943 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
944
945 /* Read payload data */
946 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
947 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
948
949 /* Write ICV */
950 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
951 LDST_SRCDST_BYTE_CONTEXT);
952
953 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
954 desc_bytes(desc),
955 DMA_TO_DEVICE);
956 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
957 dev_err(jrdev, "unable to map shared descriptor\n");
958 return -ENOMEM;
959 }
960#ifdef DEBUG
961 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
962 DUMP_PREFIX_ADDRESS, 16, 4, desc,
963 desc_bytes(desc), 1);
964#endif
965
966 /*
967 * Job Descriptor and Shared Descriptors
968 * must all fit into the 64-word Descriptor h/w Buffer
969 */
970 keys_fit_inline = false;
971 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
972 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
973 keys_fit_inline = true;
974
975 desc = ctx->sh_desc_dec;
976
977 init_sh_desc(desc, HDR_SHARE_SERIAL);
978
979 /* Skip key loading if it is loaded due to sharing */
980 key_jump_cmd = append_jump(desc, JUMP_JSL |
981 JUMP_TEST_ALL | JUMP_COND_SHRD);
982 if (keys_fit_inline)
983 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
984 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
985 else
986 append_key(desc, ctx->key_dma, ctx->enckeylen,
987 CLASS_1 | KEY_DEST_CLASS_REG);
988 set_jump_tgt_here(desc, key_jump_cmd);
989
990 /* Class 1 operation */
991 append_operation(desc, ctx->class1_alg_type |
992 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
993
994 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
995 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
996 ctx->authsize + tfm->ivsize);
997
998 /* assoclen = (assoclen + cryptlen) - cryptlen */
999 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1000 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1001
1002 /* Will write cryptlen bytes */
1003 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1004
1005 /* Read Salt */
1006 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1007 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1008 /* Read AES-GCM-ESP IV */
1009 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1010 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1011
1012 /* Read assoc data */
1013 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1014 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1015
1016 /* Will read cryptlen bytes */
1017 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1018
1019 /* Store payload data */
1020 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1021
1022 /* Read encrypted data */
1023 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1024 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1025
1026 /* Read ICV */
1027 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1028 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1029
1030 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1031 desc_bytes(desc),
1032 DMA_TO_DEVICE);
1033 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1034 dev_err(jrdev, "unable to map shared descriptor\n");
1035 return -ENOMEM;
1036 }
1037#ifdef DEBUG
1038 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1039 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1040 desc_bytes(desc), 1);
1041#endif
1042
1043 /*
1044 * Job Descriptor and Shared Descriptors
1045 * must all fit into the 64-word Descriptor h/w Buffer
1046 */
1047 keys_fit_inline = false;
1048 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1049 ctx->split_key_pad_len + ctx->enckeylen <=
1050 CAAM_DESC_BYTES_MAX)
1051 keys_fit_inline = true;
1052
1053 /* rfc4106_givencrypt shared descriptor */
1054 desc = ctx->sh_desc_givenc;
1055
1056 init_sh_desc(desc, HDR_SHARE_SERIAL);
1057
1058 /* Skip key loading if it is loaded due to sharing */
1059 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1060 JUMP_COND_SHRD);
1061 if (keys_fit_inline)
1062 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1063 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1064 else
1065 append_key(desc, ctx->key_dma, ctx->enckeylen,
1066 CLASS_1 | KEY_DEST_CLASS_REG);
1067 set_jump_tgt_here(desc, key_jump_cmd);
1068
1069 /* Generate IV */
1070 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1071 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1072 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1073 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1074 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1075 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1076 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1077 (tfm->ivsize << MOVE_LEN_SHIFT));
1078 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1079
1080 /* Copy generated IV to OFIFO */
1081 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1082 (tfm->ivsize << MOVE_LEN_SHIFT));
1083
1084 /* Class 1 operation */
1085 append_operation(desc, ctx->class1_alg_type |
1086 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1087
1088 /* ivsize + cryptlen = seqoutlen - authsize */
1089 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1090
1091 /* assoclen = seqinlen - (ivsize + cryptlen) */
1092 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1093
1094 /* Will write ivsize + cryptlen */
1095 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1096
1097 /* Read Salt and generated IV */
1098 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1099 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1100 /* Append Salt */
1101 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1102 set_move_tgt_here(desc, move_cmd);
1103 set_move_tgt_here(desc, write_iv_cmd);
1104 /* Blank commands. Will be overwritten by generated IV. */
1105 append_cmd(desc, 0x00000000);
1106 append_cmd(desc, 0x00000000);
1107 /* End of blank commands */
1108
1109 /* No need to reload iv */
1110 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1111
1112 /* Read assoc data */
1113 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1114 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1115
1116 /* Will read cryptlen */
1117 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1118
1119 /* Store generated IV and encrypted data */
1120 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1121
1122 /* Read payload data */
1123 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1124 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1125
1126 /* Write ICV */
1127 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1128 LDST_SRCDST_BYTE_CONTEXT);
1129
1130 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1131 desc_bytes(desc),
1132 DMA_TO_DEVICE);
1133 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1134 dev_err(jrdev, "unable to map shared descriptor\n");
1135 return -ENOMEM;
1136 }
1137#ifdef DEBUG
1138 print_hex_dump(KERN_ERR,
1139 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1141 desc_bytes(desc), 1);
1142#endif
1143
1144 return 0;
1145}
1146
1147static int rfc4106_setauthsize(struct crypto_aead *authenc,
1148 unsigned int authsize)
1149{
1150 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1151
1152 ctx->authsize = authsize;
1153 rfc4106_set_sh_desc(authenc);
1154
1155 return 0;
1156}
1157
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001158static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1159{
1160 struct aead_tfm *tfm = &aead->base.crt_aead;
1161 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1162 struct device *jrdev = ctx->jrdev;
1163 bool keys_fit_inline = false;
1164 u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
1165 u32 *read_move_cmd, *write_move_cmd;
1166 u32 *desc;
1167 u32 geniv;
1168
1169 if (!ctx->enckeylen || !ctx->authsize)
1170 return 0;
1171
1172 /*
1173 * RFC4543 encrypt shared descriptor
1174 * Job Descriptor and Shared Descriptor
1175 * must fit into the 64-word Descriptor h/w Buffer
1176 */
1177 if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
1178 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1179 keys_fit_inline = true;
1180
1181 desc = ctx->sh_desc_enc;
1182
1183 init_sh_desc(desc, HDR_SHARE_SERIAL);
1184
1185 /* Skip key loading if it is loaded due to sharing */
1186 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1187 JUMP_COND_SHRD);
1188 if (keys_fit_inline)
1189 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1190 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1191 else
1192 append_key(desc, ctx->key_dma, ctx->enckeylen,
1193 CLASS_1 | KEY_DEST_CLASS_REG);
1194 set_jump_tgt_here(desc, key_jump_cmd);
1195
1196 /* Class 1 operation */
1197 append_operation(desc, ctx->class1_alg_type |
1198 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1199
1200 /* Load AES-GMAC ESP IV into Math1 register */
1201 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1202 LDST_CLASS_DECO | tfm->ivsize);
1203
1204 /* Wait the DMA transaction to finish */
1205 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1206 (1 << JUMP_OFFSET_SHIFT));
1207
1208 /* Overwrite blank immediate AES-GMAC ESP IV data */
1209 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1210 (tfm->ivsize << MOVE_LEN_SHIFT));
1211
1212 /* Overwrite blank immediate AAD data */
1213 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1214 (tfm->ivsize << MOVE_LEN_SHIFT));
1215
1216 /* cryptlen = seqoutlen - authsize */
1217 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1218
1219 /* assoclen = (seqinlen - ivsize) - cryptlen */
1220 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1221
1222 /* Read Salt and AES-GMAC ESP IV */
1223 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1224 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1225 /* Append Salt */
1226 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1227 set_move_tgt_here(desc, write_iv_cmd);
1228 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1229 append_cmd(desc, 0x00000000);
1230 append_cmd(desc, 0x00000000);
1231 /* End of blank commands */
1232
1233 /* Read assoc data */
1234 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1235 FIFOLD_TYPE_AAD);
1236
1237 /* Will read cryptlen bytes */
1238 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1239
1240 /* Will write cryptlen bytes */
1241 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1242
1243 /*
1244 * MOVE_LEN opcode is not available in all SEC HW revisions,
1245 * thus need to do some magic, i.e. self-patch the descriptor
1246 * buffer.
1247 */
1248 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1249 (0x6 << MOVE_LEN_SHIFT));
1250 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1251 (0x8 << MOVE_LEN_SHIFT));
1252
1253 /* Authenticate AES-GMAC ESP IV */
1254 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1255 FIFOLD_TYPE_AAD | tfm->ivsize);
1256 set_move_tgt_here(desc, write_aad_cmd);
1257 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1258 append_cmd(desc, 0x00000000);
1259 append_cmd(desc, 0x00000000);
1260 /* End of blank commands */
1261
1262 /* Read and write cryptlen bytes */
1263 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1264
1265 set_move_tgt_here(desc, read_move_cmd);
1266 set_move_tgt_here(desc, write_move_cmd);
1267 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1268 /* Move payload data to OFIFO */
1269 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1270
1271 /* Write ICV */
1272 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1273 LDST_SRCDST_BYTE_CONTEXT);
1274
1275 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1276 desc_bytes(desc),
1277 DMA_TO_DEVICE);
1278 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1279 dev_err(jrdev, "unable to map shared descriptor\n");
1280 return -ENOMEM;
1281 }
1282#ifdef DEBUG
1283 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1284 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1285 desc_bytes(desc), 1);
1286#endif
1287
1288 /*
1289 * Job Descriptor and Shared Descriptors
1290 * must all fit into the 64-word Descriptor h/w Buffer
1291 */
1292 keys_fit_inline = false;
1293 if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
1294 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1295 keys_fit_inline = true;
1296
1297 desc = ctx->sh_desc_dec;
1298
1299 init_sh_desc(desc, HDR_SHARE_SERIAL);
1300
1301 /* Skip key loading if it is loaded due to sharing */
1302 key_jump_cmd = append_jump(desc, JUMP_JSL |
1303 JUMP_TEST_ALL | JUMP_COND_SHRD);
1304 if (keys_fit_inline)
1305 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1306 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1307 else
1308 append_key(desc, ctx->key_dma, ctx->enckeylen,
1309 CLASS_1 | KEY_DEST_CLASS_REG);
1310 set_jump_tgt_here(desc, key_jump_cmd);
1311
1312 /* Class 1 operation */
1313 append_operation(desc, ctx->class1_alg_type |
1314 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1315
1316 /* Load AES-GMAC ESP IV into Math1 register */
1317 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1318 LDST_CLASS_DECO | tfm->ivsize);
1319
1320 /* Wait the DMA transaction to finish */
1321 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1322 (1 << JUMP_OFFSET_SHIFT));
1323
1324 /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1325 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
1326
1327 /* Overwrite blank immediate AES-GMAC ESP IV data */
1328 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1329 (tfm->ivsize << MOVE_LEN_SHIFT));
1330
1331 /* Overwrite blank immediate AAD data */
1332 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1333 (tfm->ivsize << MOVE_LEN_SHIFT));
1334
1335 /* assoclen = (assoclen + cryptlen) - cryptlen */
1336 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1337 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1338
1339 /*
1340 * MOVE_LEN opcode is not available in all SEC HW revisions,
1341 * thus need to do some magic, i.e. self-patch the descriptor
1342 * buffer.
1343 */
1344 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1345 (0x6 << MOVE_LEN_SHIFT));
1346 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1347 (0x8 << MOVE_LEN_SHIFT));
1348
1349 /* Read Salt and AES-GMAC ESP IV */
1350 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1351 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1352 /* Append Salt */
1353 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1354 set_move_tgt_here(desc, write_iv_cmd);
1355 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1356 append_cmd(desc, 0x00000000);
1357 append_cmd(desc, 0x00000000);
1358 /* End of blank commands */
1359
1360 /* Read assoc data */
1361 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1362 FIFOLD_TYPE_AAD);
1363
1364 /* Will read cryptlen bytes */
1365 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1366
1367 /* Will write cryptlen bytes */
1368 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
1369
1370 /* Authenticate AES-GMAC ESP IV */
1371 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1372 FIFOLD_TYPE_AAD | tfm->ivsize);
1373 set_move_tgt_here(desc, write_aad_cmd);
1374 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1375 append_cmd(desc, 0x00000000);
1376 append_cmd(desc, 0x00000000);
1377 /* End of blank commands */
1378
1379 /* Store payload data */
1380 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1381
1382 /* In-snoop cryptlen data */
1383 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1384 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1385
1386 set_move_tgt_here(desc, read_move_cmd);
1387 set_move_tgt_here(desc, write_move_cmd);
1388 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1389 /* Move payload data to OFIFO */
1390 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1391 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1392
1393 /* Read ICV */
1394 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1395 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1396
1397 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1398 desc_bytes(desc),
1399 DMA_TO_DEVICE);
1400 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1401 dev_err(jrdev, "unable to map shared descriptor\n");
1402 return -ENOMEM;
1403 }
1404#ifdef DEBUG
1405 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1407 desc_bytes(desc), 1);
1408#endif
1409
1410 /*
1411 * Job Descriptor and Shared Descriptors
1412 * must all fit into the 64-word Descriptor h/w Buffer
1413 */
1414 keys_fit_inline = false;
1415 if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
1416 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1417 keys_fit_inline = true;
1418
1419 /* rfc4543_givencrypt shared descriptor */
1420 desc = ctx->sh_desc_givenc;
1421
1422 init_sh_desc(desc, HDR_SHARE_SERIAL);
1423
1424 /* Skip key loading if it is loaded due to sharing */
1425 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1426 JUMP_COND_SHRD);
1427 if (keys_fit_inline)
1428 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1429 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1430 else
1431 append_key(desc, ctx->key_dma, ctx->enckeylen,
1432 CLASS_1 | KEY_DEST_CLASS_REG);
1433 set_jump_tgt_here(desc, key_jump_cmd);
1434
1435 /* Generate IV */
1436 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1437 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1438 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1439 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1440 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1441 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1442 /* Move generated IV to Math1 register */
1443 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
1444 (tfm->ivsize << MOVE_LEN_SHIFT));
1445 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1446
1447 /* Overwrite blank immediate AES-GMAC IV data */
1448 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1449 (tfm->ivsize << MOVE_LEN_SHIFT));
1450
1451 /* Overwrite blank immediate AAD data */
1452 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1453 (tfm->ivsize << MOVE_LEN_SHIFT));
1454
1455 /* Copy generated IV to OFIFO */
1456 append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
1457 (tfm->ivsize << MOVE_LEN_SHIFT));
1458
1459 /* Class 1 operation */
1460 append_operation(desc, ctx->class1_alg_type |
1461 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1462
1463 /* ivsize + cryptlen = seqoutlen - authsize */
1464 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1465
1466 /* assoclen = seqinlen - (ivsize + cryptlen) */
1467 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1468
1469 /* Will write ivsize + cryptlen */
1470 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1471
1472 /*
1473 * MOVE_LEN opcode is not available in all SEC HW revisions,
1474 * thus need to do some magic, i.e. self-patch the descriptor
1475 * buffer.
1476 */
1477 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1478 (0x6 << MOVE_LEN_SHIFT));
1479 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1480 (0x8 << MOVE_LEN_SHIFT));
1481
1482 /* Read Salt and AES-GMAC generated IV */
1483 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1484 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1485 /* Append Salt */
1486 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1487 set_move_tgt_here(desc, write_iv_cmd);
1488 /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1489 append_cmd(desc, 0x00000000);
1490 append_cmd(desc, 0x00000000);
1491 /* End of blank commands */
1492
1493 /* No need to reload iv */
1494 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1495
1496 /* Read assoc data */
1497 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1498 FIFOLD_TYPE_AAD);
1499
1500 /* Will read cryptlen */
1501 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1502
1503 /* Authenticate AES-GMAC IV */
1504 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1505 FIFOLD_TYPE_AAD | tfm->ivsize);
1506 set_move_tgt_here(desc, write_aad_cmd);
1507 /* Blank commands. Will be overwritten by AES-GMAC IV. */
1508 append_cmd(desc, 0x00000000);
1509 append_cmd(desc, 0x00000000);
1510 /* End of blank commands */
1511
1512 /* Read and write cryptlen bytes */
1513 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1514
1515 set_move_tgt_here(desc, read_move_cmd);
1516 set_move_tgt_here(desc, write_move_cmd);
1517 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1518 /* Move payload data to OFIFO */
1519 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1520
1521 /* Write ICV */
1522 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1523 LDST_SRCDST_BYTE_CONTEXT);
1524
1525 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1526 desc_bytes(desc),
1527 DMA_TO_DEVICE);
1528 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1529 dev_err(jrdev, "unable to map shared descriptor\n");
1530 return -ENOMEM;
1531 }
1532#ifdef DEBUG
1533 print_hex_dump(KERN_ERR,
1534 "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
1535 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1536 desc_bytes(desc), 1);
1537#endif
1538
1539 return 0;
1540}
1541
1542static int rfc4543_setauthsize(struct crypto_aead *authenc,
1543 unsigned int authsize)
1544{
1545 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1546
1547 ctx->authsize = authsize;
1548 rfc4543_set_sh_desc(authenc);
1549
1550 return 0;
1551}
1552
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001553static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1554 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001555{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001556 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1557 ctx->split_key_pad_len, key_in, authkeylen,
1558 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001559}
1560
Yuan Kang0e479302011-07-15 11:21:41 +08001561static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001562 const u8 *key, unsigned int keylen)
1563{
1564 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1565 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1566 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1567 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001568 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001569 int ret = 0;
1570
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001571 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001572 goto badkey;
1573
1574 /* Pick class 2 key length from algorithm submask */
1575 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1576 OP_ALG_ALGSEL_SHIFT] * 2;
1577 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1578
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001579 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1580 goto badkey;
1581
Kim Phillips8e8ec592011-03-13 16:54:26 +08001582#ifdef DEBUG
1583 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001584 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1585 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001586 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1587 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001588 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1590#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001591
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001592 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001593 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001594 goto badkey;
1595 }
1596
1597 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001598 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001599
Yuan Kang885e9e22011-07-15 11:21:41 +08001600 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001601 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001602 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001603 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001604 return -ENOMEM;
1605 }
1606#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001607 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001608 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001609 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001610#endif
1611
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001612 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001613
Yuan Kang1acebad2011-07-15 11:21:42 +08001614 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001615 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001616 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001617 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001618 }
1619
1620 return ret;
1621badkey:
1622 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1623 return -EINVAL;
1624}
1625
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001626static int gcm_setkey(struct crypto_aead *aead,
1627 const u8 *key, unsigned int keylen)
1628{
1629 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1630 struct device *jrdev = ctx->jrdev;
1631 int ret = 0;
1632
1633#ifdef DEBUG
1634 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1635 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1636#endif
1637
1638 memcpy(ctx->key, key, keylen);
1639 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1640 DMA_TO_DEVICE);
1641 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1642 dev_err(jrdev, "unable to map key i/o memory\n");
1643 return -ENOMEM;
1644 }
1645 ctx->enckeylen = keylen;
1646
1647 ret = gcm_set_sh_desc(aead);
1648 if (ret) {
1649 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1650 DMA_TO_DEVICE);
1651 }
1652
1653 return ret;
1654}
1655
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001656static int rfc4106_setkey(struct crypto_aead *aead,
1657 const u8 *key, unsigned int keylen)
1658{
1659 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1660 struct device *jrdev = ctx->jrdev;
1661 int ret = 0;
1662
1663 if (keylen < 4)
1664 return -EINVAL;
1665
1666#ifdef DEBUG
1667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1668 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1669#endif
1670
1671 memcpy(ctx->key, key, keylen);
1672
1673 /*
1674 * The last four bytes of the key material are used as the salt value
1675 * in the nonce. Update the AES key length.
1676 */
1677 ctx->enckeylen = keylen - 4;
1678
1679 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1680 DMA_TO_DEVICE);
1681 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1682 dev_err(jrdev, "unable to map key i/o memory\n");
1683 return -ENOMEM;
1684 }
1685
1686 ret = rfc4106_set_sh_desc(aead);
1687 if (ret) {
1688 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1689 DMA_TO_DEVICE);
1690 }
1691
1692 return ret;
1693}
1694
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001695static int rfc4543_setkey(struct crypto_aead *aead,
1696 const u8 *key, unsigned int keylen)
1697{
1698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1699 struct device *jrdev = ctx->jrdev;
1700 int ret = 0;
1701
1702 if (keylen < 4)
1703 return -EINVAL;
1704
1705#ifdef DEBUG
1706 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1707 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1708#endif
1709
1710 memcpy(ctx->key, key, keylen);
1711
1712 /*
1713 * The last four bytes of the key material are used as the salt value
1714 * in the nonce. Update the AES key length.
1715 */
1716 ctx->enckeylen = keylen - 4;
1717
1718 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1719 DMA_TO_DEVICE);
1720 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1721 dev_err(jrdev, "unable to map key i/o memory\n");
1722 return -ENOMEM;
1723 }
1724
1725 ret = rfc4543_set_sh_desc(aead);
1726 if (ret) {
1727 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1728 DMA_TO_DEVICE);
1729 }
1730
1731 return ret;
1732}
1733
Yuan Kangacdca312011-07-15 11:21:42 +08001734static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1735 const u8 *key, unsigned int keylen)
1736{
1737 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1738 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
1739 struct device *jrdev = ctx->jrdev;
1740 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001741 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001742 u32 *desc;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001743 u32 ctx1_iv_off = 0;
1744 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1745 OP_ALG_AAI_CTR_MOD128);
Yuan Kangacdca312011-07-15 11:21:42 +08001746
1747#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001748 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001749 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1750#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001751 /*
1752 * AES-CTR needs to load IV in CONTEXT1 reg
1753 * at an offset of 128bits (16bytes)
1754 * CONTEXT1[255:128] = IV
1755 */
1756 if (ctr_mode)
1757 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001758
1759 memcpy(ctx->key, key, keylen);
1760 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1761 DMA_TO_DEVICE);
1762 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1763 dev_err(jrdev, "unable to map key i/o memory\n");
1764 return -ENOMEM;
1765 }
1766 ctx->enckeylen = keylen;
1767
1768 /* ablkcipher_encrypt shared descriptor */
1769 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -05001770 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001771 /* Skip if already shared */
1772 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1773 JUMP_COND_SHRD);
1774
1775 /* Load class1 key only */
1776 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1777 ctx->enckeylen, CLASS_1 |
1778 KEY_DEST_CLASS_REG);
1779
1780 set_jump_tgt_here(desc, key_jump_cmd);
1781
Yuan Kangacdca312011-07-15 11:21:42 +08001782 /* Load iv */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001783 append_seq_load(desc, tfm->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1784 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001785
1786 /* Load operation */
1787 append_operation(desc, ctx->class1_alg_type |
1788 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1789
1790 /* Perform operation */
1791 ablkcipher_append_src_dst(desc);
1792
1793 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1794 desc_bytes(desc),
1795 DMA_TO_DEVICE);
1796 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1797 dev_err(jrdev, "unable to map shared descriptor\n");
1798 return -ENOMEM;
1799 }
1800#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001801 print_hex_dump(KERN_ERR,
1802 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001803 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1804 desc_bytes(desc), 1);
1805#endif
1806 /* ablkcipher_decrypt shared descriptor */
1807 desc = ctx->sh_desc_dec;
1808
Kim Phillips61bb86b2012-07-13 17:49:28 -05001809 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001810 /* Skip if already shared */
1811 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1812 JUMP_COND_SHRD);
1813
1814 /* Load class1 key only */
1815 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1816 ctx->enckeylen, CLASS_1 |
1817 KEY_DEST_CLASS_REG);
1818
Yuan Kangacdca312011-07-15 11:21:42 +08001819 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001820
1821 /* load IV */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001822 append_seq_load(desc, tfm->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1823 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001824
1825 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001826 if (ctr_mode)
1827 append_operation(desc, ctx->class1_alg_type |
1828 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1829 else
1830 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001831
1832 /* Perform operation */
1833 ablkcipher_append_src_dst(desc);
1834
Yuan Kangacdca312011-07-15 11:21:42 +08001835 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1836 desc_bytes(desc),
1837 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001838 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001839 dev_err(jrdev, "unable to map shared descriptor\n");
1840 return -ENOMEM;
1841 }
1842
1843#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001844 print_hex_dump(KERN_ERR,
1845 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001846 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1847 desc_bytes(desc), 1);
1848#endif
1849
1850 return ret;
1851}
1852
Kim Phillips8e8ec592011-03-13 16:54:26 +08001853/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001854 * aead_edesc - s/w-extended aead descriptor
1855 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001856 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001857 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001858 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001859 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001860 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001861 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001862 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001863 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1864 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001865 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1866 */
Yuan Kang0e479302011-07-15 11:21:41 +08001867struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001868 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001869 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001870 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001871 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001872 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001873 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001874 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001875 int sec4_sg_bytes;
1876 dma_addr_t sec4_sg_dma;
1877 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001878 u32 hw_desc[0];
1879};
1880
Yuan Kangacdca312011-07-15 11:21:42 +08001881/*
1882 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1883 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001884 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001885 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001886 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001887 * @iv_dma: dma address of iv for checking continuity and link table
1888 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001889 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1890 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001891 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1892 */
1893struct ablkcipher_edesc {
1894 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001895 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001896 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001897 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001898 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001899 int sec4_sg_bytes;
1900 dma_addr_t sec4_sg_dma;
1901 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001902 u32 hw_desc[0];
1903};
1904
Yuan Kang1acebad2011-07-15 11:21:42 +08001905static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001906 struct scatterlist *dst, int src_nents,
1907 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001908 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1909 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001910{
Yuan Kang643b39b2012-06-22 19:48:49 -05001911 if (dst != src) {
1912 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1913 src_chained);
1914 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1915 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001916 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001917 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1918 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001919 }
1920
Yuan Kang1acebad2011-07-15 11:21:42 +08001921 if (iv_dma)
1922 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001923 if (sec4_sg_bytes)
1924 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001925 DMA_TO_DEVICE);
1926}
1927
Yuan Kang1acebad2011-07-15 11:21:42 +08001928static void aead_unmap(struct device *dev,
1929 struct aead_edesc *edesc,
1930 struct aead_request *req)
1931{
1932 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1933 int ivsize = crypto_aead_ivsize(aead);
1934
Yuan Kang643b39b2012-06-22 19:48:49 -05001935 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1936 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001937
1938 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001939 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1940 edesc->dst_chained, edesc->iv_dma, ivsize,
1941 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001942}
1943
Yuan Kangacdca312011-07-15 11:21:42 +08001944static void ablkcipher_unmap(struct device *dev,
1945 struct ablkcipher_edesc *edesc,
1946 struct ablkcipher_request *req)
1947{
1948 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1949 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1950
1951 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001952 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1953 edesc->dst_chained, edesc->iv_dma, ivsize,
1954 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001955}
1956
Yuan Kang0e479302011-07-15 11:21:41 +08001957static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001958 void *context)
1959{
Yuan Kang0e479302011-07-15 11:21:41 +08001960 struct aead_request *req = context;
1961 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001962#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001963 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001964 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001965 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001966
1967 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1968#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001969
Yuan Kang0e479302011-07-15 11:21:41 +08001970 edesc = (struct aead_edesc *)((char *)desc -
1971 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001972
Marek Vasutfa9659c2014-04-24 20:05:12 +02001973 if (err)
1974 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001975
Yuan Kang0e479302011-07-15 11:21:41 +08001976 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001977
1978#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001979 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001980 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1981 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001982 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001983 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001984 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001985 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001986 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1987 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001988 ctx->authsize + 4, 1);
1989#endif
1990
1991 kfree(edesc);
1992
Yuan Kang0e479302011-07-15 11:21:41 +08001993 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001994}
1995
Yuan Kang0e479302011-07-15 11:21:41 +08001996static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001997 void *context)
1998{
Yuan Kang0e479302011-07-15 11:21:41 +08001999 struct aead_request *req = context;
2000 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002001#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08002002 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002003 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002004 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002005
2006 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2007#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002008
Yuan Kang0e479302011-07-15 11:21:41 +08002009 edesc = (struct aead_edesc *)((char *)desc -
2010 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002011
Yuan Kang1acebad2011-07-15 11:21:42 +08002012#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002013 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002014 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2015 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002016 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002017 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02002018 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002019#endif
2020
Marek Vasutfa9659c2014-04-24 20:05:12 +02002021 if (err)
2022 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002023
Yuan Kang0e479302011-07-15 11:21:41 +08002024 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002025
2026 /*
2027 * verify hw auth check passed else return -EBADMSG
2028 */
2029 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2030 err = -EBADMSG;
2031
2032#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002033 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002034 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08002035 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
2036 sizeof(struct iphdr) + req->assoclen +
2037 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08002038 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05002039 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08002040 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03002041 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002042 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
2043 sg->length + ctx->authsize + 16, 1);
2044 }
2045#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002046
Kim Phillips8e8ec592011-03-13 16:54:26 +08002047 kfree(edesc);
2048
Yuan Kang0e479302011-07-15 11:21:41 +08002049 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002050}
2051
Yuan Kangacdca312011-07-15 11:21:42 +08002052static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2053 void *context)
2054{
2055 struct ablkcipher_request *req = context;
2056 struct ablkcipher_edesc *edesc;
2057#ifdef DEBUG
2058 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2059 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2060
2061 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2062#endif
2063
2064 edesc = (struct ablkcipher_edesc *)((char *)desc -
2065 offsetof(struct ablkcipher_edesc, hw_desc));
2066
Marek Vasutfa9659c2014-04-24 20:05:12 +02002067 if (err)
2068 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002069
2070#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002071 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002072 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2073 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002074 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002075 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2076 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2077#endif
2078
2079 ablkcipher_unmap(jrdev, edesc, req);
2080 kfree(edesc);
2081
2082 ablkcipher_request_complete(req, err);
2083}
2084
2085static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2086 void *context)
2087{
2088 struct ablkcipher_request *req = context;
2089 struct ablkcipher_edesc *edesc;
2090#ifdef DEBUG
2091 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2092 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2093
2094 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2095#endif
2096
2097 edesc = (struct ablkcipher_edesc *)((char *)desc -
2098 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002099 if (err)
2100 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002101
2102#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002103 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002104 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2105 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002106 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002107 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2108 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2109#endif
2110
2111 ablkcipher_unmap(jrdev, edesc, req);
2112 kfree(edesc);
2113
2114 ablkcipher_request_complete(req, err);
2115}
2116
Kim Phillips8e8ec592011-03-13 16:54:26 +08002117/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002118 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002119 */
Yuan Kang1acebad2011-07-15 11:21:42 +08002120static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2121 struct aead_edesc *edesc,
2122 struct aead_request *req,
2123 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002124{
Yuan Kang0e479302011-07-15 11:21:41 +08002125 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002126 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002127 int ivsize = crypto_aead_ivsize(aead);
2128 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08002129 u32 *desc = edesc->hw_desc;
2130 u32 out_options = 0, in_options;
2131 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002132 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002133 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002134
Yuan Kang1acebad2011-07-15 11:21:42 +08002135#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08002136 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08002137 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002138 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002139 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2140 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002141 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002142 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002143 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002144 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002145 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08002146 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002147 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002148 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2149 desc_bytes(sh_desc), 1);
2150#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002151
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002152 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2153 OP_ALG_ALGSEL_AES) &&
2154 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2155 is_gcm = true;
2156
Yuan Kang1acebad2011-07-15 11:21:42 +08002157 len = desc_len(sh_desc);
2158 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2159
2160 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002161 if (is_gcm)
2162 src_dma = edesc->iv_dma;
2163 else
2164 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002165 in_options = 0;
2166 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002167 src_dma = edesc->sec4_sg_dma;
2168 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2169 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002170 in_options = LDST_SGF;
2171 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002172
2173 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2174 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002175
Yuan Kang1acebad2011-07-15 11:21:42 +08002176 if (likely(req->src == req->dst)) {
2177 if (all_contig) {
2178 dst_dma = sg_dma_address(req->src);
2179 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002180 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08002181 ((edesc->assoc_nents ? : 1) + 1);
2182 out_options = LDST_SGF;
2183 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002184 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002185 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08002186 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002187 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002188 dst_dma = edesc->sec4_sg_dma +
2189 sec4_sg_index *
2190 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002191 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002192 }
2193 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002194 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02002195 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2196 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002197 else
Yuan Kang1acebad2011-07-15 11:21:42 +08002198 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2199 out_options);
2200}
2201
2202/*
2203 * Fill in aead givencrypt job descriptor
2204 */
2205static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2206 struct aead_edesc *edesc,
2207 struct aead_request *req,
2208 int contig)
2209{
2210 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2211 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2212 int ivsize = crypto_aead_ivsize(aead);
2213 int authsize = ctx->authsize;
2214 u32 *desc = edesc->hw_desc;
2215 u32 out_options = 0, in_options;
2216 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002217 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002218 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002219
2220#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08002221 debug("assoclen %d cryptlen %d authsize %d\n",
2222 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002223 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002224 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2225 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002226 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002227 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002228 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002229 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2230 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002231 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002232 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2233 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002234#endif
2235
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002236 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2237 OP_ALG_ALGSEL_AES) &&
2238 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2239 is_gcm = true;
2240
Yuan Kang1acebad2011-07-15 11:21:42 +08002241 len = desc_len(sh_desc);
2242 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2243
2244 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002245 if (is_gcm)
2246 src_dma = edesc->iv_dma;
2247 else
2248 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002249 in_options = 0;
2250 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002251 src_dma = edesc->sec4_sg_dma;
2252 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002253 in_options = LDST_SGF;
2254 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002255 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2256 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08002257
2258 if (contig & GIV_DST_CONTIG) {
2259 dst_dma = edesc->iv_dma;
2260 } else {
2261 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05002262 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002263 (edesc->assoc_nents +
2264 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08002265 out_options = LDST_SGF;
2266 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002267 dst_dma = edesc->sec4_sg_dma +
2268 sec4_sg_index *
2269 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002270 out_options = LDST_SGF;
2271 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002272 }
2273
Horia Geantabbf9c892013-11-28 15:11:16 +02002274 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2275 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002276}
2277
2278/*
Yuan Kangacdca312011-07-15 11:21:42 +08002279 * Fill in ablkcipher job descriptor
2280 */
2281static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2282 struct ablkcipher_edesc *edesc,
2283 struct ablkcipher_request *req,
2284 bool iv_contig)
2285{
2286 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2287 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2288 u32 *desc = edesc->hw_desc;
2289 u32 out_options = 0, in_options;
2290 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002291 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002292
2293#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002294 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002295 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2296 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002297 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002298 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2299 edesc->src_nents ? 100 : req->nbytes, 1);
2300#endif
2301
2302 len = desc_len(sh_desc);
2303 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2304
2305 if (iv_contig) {
2306 src_dma = edesc->iv_dma;
2307 in_options = 0;
2308 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002309 src_dma = edesc->sec4_sg_dma;
2310 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002311 in_options = LDST_SGF;
2312 }
2313 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2314
2315 if (likely(req->src == req->dst)) {
2316 if (!edesc->src_nents && iv_contig) {
2317 dst_dma = sg_dma_address(req->src);
2318 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002319 dst_dma = edesc->sec4_sg_dma +
2320 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002321 out_options = LDST_SGF;
2322 }
2323 } else {
2324 if (!edesc->dst_nents) {
2325 dst_dma = sg_dma_address(req->dst);
2326 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002327 dst_dma = edesc->sec4_sg_dma +
2328 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002329 out_options = LDST_SGF;
2330 }
2331 }
2332 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2333}
2334
2335/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002336 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002337 */
Yuan Kang0e479302011-07-15 11:21:41 +08002338static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02002339 int desc_bytes, bool *all_contig_ptr,
2340 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002341{
Yuan Kang0e479302011-07-15 11:21:41 +08002342 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002343 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2344 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002345 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2346 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2347 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002348 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002349 dma_addr_t iv_dma = 0;
2350 int sgc;
2351 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05002352 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08002353 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05002354 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02002355 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002356 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002357
Yuan Kang643b39b2012-06-22 19:48:49 -05002358 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002359
Horia Geantabbf9c892013-11-28 15:11:16 +02002360 if (unlikely(req->dst != req->src)) {
2361 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2362 dst_nents = sg_count(req->dst,
2363 req->cryptlen +
2364 (encrypt ? authsize : (-authsize)),
2365 &dst_chained);
2366 } else {
2367 src_nents = sg_count(req->src,
2368 req->cryptlen +
2369 (encrypt ? authsize : 0),
2370 &src_chained);
2371 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002372
Yuan Kang643b39b2012-06-22 19:48:49 -05002373 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002374 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002375 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002376 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2377 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002378 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002379 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2380 DMA_TO_DEVICE, src_chained);
2381 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2382 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002383 }
2384
Yuan Kang1acebad2011-07-15 11:21:42 +08002385 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002386 if (dma_mapping_error(jrdev, iv_dma)) {
2387 dev_err(jrdev, "unable to map IV\n");
2388 return ERR_PTR(-ENOMEM);
2389 }
2390
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002391 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2392 OP_ALG_ALGSEL_AES) &&
2393 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2394 is_gcm = true;
2395
2396 /*
2397 * Check if data are contiguous.
2398 * GCM expected input sequence: IV, AAD, text
2399 * All other - expected input sequence: AAD, IV, text
2400 */
2401 if (is_gcm)
2402 all_contig = (!assoc_nents &&
2403 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2404 !src_nents && sg_dma_address(req->assoc) +
2405 req->assoclen == sg_dma_address(req->src));
2406 else
2407 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2408 req->assoclen == iv_dma && !src_nents &&
2409 iv_dma + ivsize == sg_dma_address(req->src));
2410 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08002411 assoc_nents = assoc_nents ? : 1;
2412 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002413 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002414 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002415
Yuan Kanga299c832012-06-22 19:48:46 -05002416 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002417
Yuan Kanga299c832012-06-22 19:48:46 -05002418 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002419
2420 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08002421 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002422 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002423 if (!edesc) {
2424 dev_err(jrdev, "could not allocate extended descriptor\n");
2425 return ERR_PTR(-ENOMEM);
2426 }
2427
2428 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002429 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002430 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002431 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002432 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002433 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002434 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002435 edesc->sec4_sg_bytes = sec4_sg_bytes;
2436 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2437 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002438 *all_contig_ptr = all_contig;
2439
Yuan Kanga299c832012-06-22 19:48:46 -05002440 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002441 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002442 if (!is_gcm) {
2443 sg_to_sec4_sg(req->assoc,
2444 (assoc_nents ? : 1),
2445 edesc->sec4_sg +
2446 sec4_sg_index, 0);
2447 sec4_sg_index += assoc_nents ? : 1;
2448 }
2449
Yuan Kanga299c832012-06-22 19:48:46 -05002450 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002451 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002452 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002453
2454 if (is_gcm) {
2455 sg_to_sec4_sg(req->assoc,
2456 (assoc_nents ? : 1),
2457 edesc->sec4_sg +
2458 sec4_sg_index, 0);
2459 sec4_sg_index += assoc_nents ? : 1;
2460 }
2461
Yuan Kanga299c832012-06-22 19:48:46 -05002462 sg_to_sec4_sg_last(req->src,
2463 (src_nents ? : 1),
2464 edesc->sec4_sg +
2465 sec4_sg_index, 0);
2466 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08002467 }
2468 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002469 sg_to_sec4_sg_last(req->dst, dst_nents,
2470 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002471 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302472 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2473 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002474 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2475 dev_err(jrdev, "unable to map S/G table\n");
2476 return ERR_PTR(-ENOMEM);
2477 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002478
2479 return edesc;
2480}
2481
Yuan Kang0e479302011-07-15 11:21:41 +08002482static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002483{
Yuan Kang0e479302011-07-15 11:21:41 +08002484 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002485 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002486 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2487 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002488 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002489 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002490 int ret = 0;
2491
Kim Phillips8e8ec592011-03-13 16:54:26 +08002492 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002493 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002494 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002495 if (IS_ERR(edesc))
2496 return PTR_ERR(edesc);
2497
Yuan Kang1acebad2011-07-15 11:21:42 +08002498 /* Create and submit job descriptor */
2499 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2500 all_contig, true);
2501#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002502 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002503 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2504 desc_bytes(edesc->hw_desc), 1);
2505#endif
2506
Kim Phillips8e8ec592011-03-13 16:54:26 +08002507 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002508 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2509 if (!ret) {
2510 ret = -EINPROGRESS;
2511 } else {
2512 aead_unmap(jrdev, edesc, req);
2513 kfree(edesc);
2514 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002515
Yuan Kang1acebad2011-07-15 11:21:42 +08002516 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002517}
2518
Yuan Kang0e479302011-07-15 11:21:41 +08002519static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002520{
Yuan Kang1acebad2011-07-15 11:21:42 +08002521 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002522 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002523 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2524 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002525 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002526 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002527 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002528
2529 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002530 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002531 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08002532 if (IS_ERR(edesc))
2533 return PTR_ERR(edesc);
2534
Yuan Kang1acebad2011-07-15 11:21:42 +08002535#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002536 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002537 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2538 req->cryptlen, 1);
2539#endif
2540
2541 /* Create and submit job descriptor*/
2542 init_aead_job(ctx->sh_desc_dec,
2543 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2544#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002545 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002546 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2547 desc_bytes(edesc->hw_desc), 1);
2548#endif
2549
Yuan Kang0e479302011-07-15 11:21:41 +08002550 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002551 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2552 if (!ret) {
2553 ret = -EINPROGRESS;
2554 } else {
2555 aead_unmap(jrdev, edesc, req);
2556 kfree(edesc);
2557 }
Yuan Kang0e479302011-07-15 11:21:41 +08002558
Yuan Kang1acebad2011-07-15 11:21:42 +08002559 return ret;
2560}
Yuan Kang0e479302011-07-15 11:21:41 +08002561
Yuan Kang1acebad2011-07-15 11:21:42 +08002562/*
2563 * allocate and map the aead extended descriptor for aead givencrypt
2564 */
2565static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2566 *greq, int desc_bytes,
2567 u32 *contig_ptr)
2568{
2569 struct aead_request *req = &greq->areq;
2570 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2571 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2572 struct device *jrdev = ctx->jrdev;
2573 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2574 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2575 int assoc_nents, src_nents, dst_nents = 0;
2576 struct aead_edesc *edesc;
2577 dma_addr_t iv_dma = 0;
2578 int sgc;
2579 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2580 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002581 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002582 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002583 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002584
Yuan Kang643b39b2012-06-22 19:48:49 -05002585 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2586 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002587
Yuan Kang1acebad2011-07-15 11:21:42 +08002588 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002589 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2590 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002591
Yuan Kang643b39b2012-06-22 19:48:49 -05002592 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002593 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002594 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002595 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2596 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002597 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002598 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2599 DMA_TO_DEVICE, src_chained);
2600 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2601 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002602 }
2603
Yuan Kang1acebad2011-07-15 11:21:42 +08002604 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002605 if (dma_mapping_error(jrdev, iv_dma)) {
2606 dev_err(jrdev, "unable to map IV\n");
2607 return ERR_PTR(-ENOMEM);
2608 }
2609
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002610 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2611 OP_ALG_ALGSEL_AES) &&
2612 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2613 is_gcm = true;
2614
2615 /*
2616 * Check if data are contiguous.
2617 * GCM expected input sequence: IV, AAD, text
2618 * All other - expected input sequence: AAD, IV, text
2619 */
2620
2621 if (is_gcm) {
2622 if (assoc_nents || iv_dma + ivsize !=
2623 sg_dma_address(req->assoc) || src_nents ||
2624 sg_dma_address(req->assoc) + req->assoclen !=
2625 sg_dma_address(req->src))
2626 contig &= ~GIV_SRC_CONTIG;
2627 } else {
2628 if (assoc_nents ||
2629 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2630 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2631 contig &= ~GIV_SRC_CONTIG;
2632 }
2633
Yuan Kang1acebad2011-07-15 11:21:42 +08002634 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2635 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002636
Yuan Kang1acebad2011-07-15 11:21:42 +08002637 if (!(contig & GIV_SRC_CONTIG)) {
2638 assoc_nents = assoc_nents ? : 1;
2639 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002640 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002641 if (req->src == req->dst &&
2642 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad2011-07-15 11:21:42 +08002643 contig &= ~GIV_DST_CONTIG;
2644 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002645
2646 /*
2647 * Add new sg entries for GCM output sequence.
2648 * Expected output sequence: IV, encrypted text.
2649 */
2650 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2651 sec4_sg_len += 1 + src_nents;
2652
2653 if (unlikely(req->src != req->dst)) {
2654 dst_nents = dst_nents ? : 1;
2655 sec4_sg_len += 1 + dst_nents;
2656 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002657
Yuan Kanga299c832012-06-22 19:48:46 -05002658 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002659
2660 /* allocate space for base edesc and hw desc commands, link tables */
2661 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002662 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002663 if (!edesc) {
2664 dev_err(jrdev, "could not allocate extended descriptor\n");
2665 return ERR_PTR(-ENOMEM);
2666 }
2667
2668 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002669 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002670 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002671 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002672 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002673 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002674 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002675 edesc->sec4_sg_bytes = sec4_sg_bytes;
2676 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2677 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002678 *contig_ptr = contig;
2679
Yuan Kanga299c832012-06-22 19:48:46 -05002680 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002681 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002682 if (!is_gcm) {
2683 sg_to_sec4_sg(req->assoc, assoc_nents,
2684 edesc->sec4_sg + sec4_sg_index, 0);
2685 sec4_sg_index += assoc_nents;
2686 }
2687
Yuan Kanga299c832012-06-22 19:48:46 -05002688 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002689 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002690 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002691
2692 if (is_gcm) {
2693 sg_to_sec4_sg(req->assoc, assoc_nents,
2694 edesc->sec4_sg + sec4_sg_index, 0);
2695 sec4_sg_index += assoc_nents;
2696 }
2697
Yuan Kanga299c832012-06-22 19:48:46 -05002698 sg_to_sec4_sg_last(req->src, src_nents,
2699 edesc->sec4_sg +
2700 sec4_sg_index, 0);
2701 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002702 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002703
2704 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2705 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2706 iv_dma, ivsize, 0);
2707 sec4_sg_index += 1;
2708 sg_to_sec4_sg_last(req->src, src_nents,
2709 edesc->sec4_sg + sec4_sg_index, 0);
2710 }
2711
Yuan Kang1acebad2011-07-15 11:21:42 +08002712 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002713 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002714 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002715 sec4_sg_index += 1;
2716 sg_to_sec4_sg_last(req->dst, dst_nents,
2717 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002718 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302719 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2720 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002721 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2722 dev_err(jrdev, "unable to map S/G table\n");
2723 return ERR_PTR(-ENOMEM);
2724 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002725
2726 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002727}
2728
2729static int aead_givencrypt(struct aead_givcrypt_request *areq)
2730{
2731 struct aead_request *req = &areq->areq;
2732 struct aead_edesc *edesc;
2733 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002734 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2735 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002736 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002737 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002738 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002739
Kim Phillips8e8ec592011-03-13 16:54:26 +08002740 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002741 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2742 CAAM_CMD_SZ, &contig);
2743
Kim Phillips8e8ec592011-03-13 16:54:26 +08002744 if (IS_ERR(edesc))
2745 return PTR_ERR(edesc);
2746
Yuan Kang1acebad2011-07-15 11:21:42 +08002747#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002748 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002749 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2750 req->cryptlen, 1);
2751#endif
2752
2753 /* Create and submit job descriptor*/
2754 init_aead_giv_job(ctx->sh_desc_givenc,
2755 ctx->sh_desc_givenc_dma, edesc, req, contig);
2756#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002757 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002758 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2759 desc_bytes(edesc->hw_desc), 1);
2760#endif
2761
Kim Phillips8e8ec592011-03-13 16:54:26 +08002762 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002763 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2764 if (!ret) {
2765 ret = -EINPROGRESS;
2766 } else {
2767 aead_unmap(jrdev, edesc, req);
2768 kfree(edesc);
2769 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002770
Yuan Kang1acebad2011-07-15 11:21:42 +08002771 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002772}
2773
Horia Geantaae4a8252014-03-14 17:46:52 +02002774static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
2775{
2776 return aead_encrypt(&areq->areq);
2777}
2778
Yuan Kangacdca312011-07-15 11:21:42 +08002779/*
2780 * allocate and map the ablkcipher extended descriptor for ablkcipher
2781 */
2782static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2783 *req, int desc_bytes,
2784 bool *iv_contig_out)
2785{
2786 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2787 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2788 struct device *jrdev = ctx->jrdev;
2789 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2790 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2791 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002792 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002793 struct ablkcipher_edesc *edesc;
2794 dma_addr_t iv_dma = 0;
2795 bool iv_contig = false;
2796 int sgc;
2797 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05002798 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002799 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002800
Yuan Kang643b39b2012-06-22 19:48:49 -05002801 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002802
Yuan Kang643b39b2012-06-22 19:48:49 -05002803 if (req->dst != req->src)
2804 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002805
2806 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002807 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2808 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002809 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002810 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2811 DMA_TO_DEVICE, src_chained);
2812 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2813 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002814 }
2815
Horia Geantace572082014-07-11 15:34:49 +03002816 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2817 if (dma_mapping_error(jrdev, iv_dma)) {
2818 dev_err(jrdev, "unable to map IV\n");
2819 return ERR_PTR(-ENOMEM);
2820 }
2821
Yuan Kangacdca312011-07-15 11:21:42 +08002822 /*
2823 * Check if iv can be contiguous with source and destination.
2824 * If so, include it. If not, create scatterlist.
2825 */
Yuan Kangacdca312011-07-15 11:21:42 +08002826 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2827 iv_contig = true;
2828 else
2829 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002830 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2831 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002832
2833 /* allocate space for base edesc and hw desc commands, link tables */
2834 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002835 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002836 if (!edesc) {
2837 dev_err(jrdev, "could not allocate extended descriptor\n");
2838 return ERR_PTR(-ENOMEM);
2839 }
2840
2841 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002842 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002843 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002844 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002845 edesc->sec4_sg_bytes = sec4_sg_bytes;
2846 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2847 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002848
Yuan Kanga299c832012-06-22 19:48:46 -05002849 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002850 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002851 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2852 sg_to_sec4_sg_last(req->src, src_nents,
2853 edesc->sec4_sg + 1, 0);
2854 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002855 }
2856
Yuan Kang643b39b2012-06-22 19:48:49 -05002857 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002858 sg_to_sec4_sg_last(req->dst, dst_nents,
2859 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002860 }
2861
Yuan Kanga299c832012-06-22 19:48:46 -05002862 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2863 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002864 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2865 dev_err(jrdev, "unable to map S/G table\n");
2866 return ERR_PTR(-ENOMEM);
2867 }
2868
Yuan Kangacdca312011-07-15 11:21:42 +08002869 edesc->iv_dma = iv_dma;
2870
2871#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002872 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002873 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2874 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002875#endif
2876
2877 *iv_contig_out = iv_contig;
2878 return edesc;
2879}
2880
2881static int ablkcipher_encrypt(struct ablkcipher_request *req)
2882{
2883 struct ablkcipher_edesc *edesc;
2884 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2885 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2886 struct device *jrdev = ctx->jrdev;
2887 bool iv_contig;
2888 u32 *desc;
2889 int ret = 0;
2890
2891 /* allocate extended descriptor */
2892 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2893 CAAM_CMD_SZ, &iv_contig);
2894 if (IS_ERR(edesc))
2895 return PTR_ERR(edesc);
2896
2897 /* Create and submit job descriptor*/
2898 init_ablkcipher_job(ctx->sh_desc_enc,
2899 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2900#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002901 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002902 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2903 desc_bytes(edesc->hw_desc), 1);
2904#endif
2905 desc = edesc->hw_desc;
2906 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2907
2908 if (!ret) {
2909 ret = -EINPROGRESS;
2910 } else {
2911 ablkcipher_unmap(jrdev, edesc, req);
2912 kfree(edesc);
2913 }
2914
2915 return ret;
2916}
2917
2918static int ablkcipher_decrypt(struct ablkcipher_request *req)
2919{
2920 struct ablkcipher_edesc *edesc;
2921 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2922 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2923 struct device *jrdev = ctx->jrdev;
2924 bool iv_contig;
2925 u32 *desc;
2926 int ret = 0;
2927
2928 /* allocate extended descriptor */
2929 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2930 CAAM_CMD_SZ, &iv_contig);
2931 if (IS_ERR(edesc))
2932 return PTR_ERR(edesc);
2933
2934 /* Create and submit job descriptor*/
2935 init_ablkcipher_job(ctx->sh_desc_dec,
2936 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2937 desc = edesc->hw_desc;
2938#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002939 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002940 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2941 desc_bytes(edesc->hw_desc), 1);
2942#endif
2943
2944 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2945 if (!ret) {
2946 ret = -EINPROGRESS;
2947 } else {
2948 ablkcipher_unmap(jrdev, edesc, req);
2949 kfree(edesc);
2950 }
2951
2952 return ret;
2953}
2954
Yuan Kang885e9e22011-07-15 11:21:41 +08002955#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002956#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002957struct caam_alg_template {
2958 char name[CRYPTO_MAX_ALG_NAME];
2959 char driver_name[CRYPTO_MAX_ALG_NAME];
2960 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002961 u32 type;
2962 union {
2963 struct ablkcipher_alg ablkcipher;
2964 struct aead_alg aead;
2965 struct blkcipher_alg blkcipher;
2966 struct cipher_alg cipher;
2967 struct compress_alg compress;
2968 struct rng_alg rng;
2969 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002970 u32 class1_alg_type;
2971 u32 class2_alg_type;
2972 u32 alg_op;
2973};
2974
2975static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02002976 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08002977 {
Horia Geantaae4a8252014-03-14 17:46:52 +02002978 .name = "authenc(hmac(md5),ecb(cipher_null))",
2979 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
2980 .blocksize = NULL_BLOCK_SIZE,
2981 .type = CRYPTO_ALG_TYPE_AEAD,
2982 .template_aead = {
2983 .setkey = aead_setkey,
2984 .setauthsize = aead_setauthsize,
2985 .encrypt = aead_encrypt,
2986 .decrypt = aead_decrypt,
2987 .givencrypt = aead_null_givencrypt,
2988 .geniv = "<built-in>",
2989 .ivsize = NULL_IV_SIZE,
2990 .maxauthsize = MD5_DIGEST_SIZE,
2991 },
2992 .class1_alg_type = 0,
2993 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2994 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2995 },
2996 {
2997 .name = "authenc(hmac(sha1),ecb(cipher_null))",
2998 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
2999 .blocksize = NULL_BLOCK_SIZE,
3000 .type = CRYPTO_ALG_TYPE_AEAD,
3001 .template_aead = {
3002 .setkey = aead_setkey,
3003 .setauthsize = aead_setauthsize,
3004 .encrypt = aead_encrypt,
3005 .decrypt = aead_decrypt,
3006 .givencrypt = aead_null_givencrypt,
3007 .geniv = "<built-in>",
3008 .ivsize = NULL_IV_SIZE,
3009 .maxauthsize = SHA1_DIGEST_SIZE,
3010 },
3011 .class1_alg_type = 0,
3012 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3013 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3014 },
3015 {
3016 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3017 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3018 .blocksize = NULL_BLOCK_SIZE,
3019 .type = CRYPTO_ALG_TYPE_AEAD,
3020 .template_aead = {
3021 .setkey = aead_setkey,
3022 .setauthsize = aead_setauthsize,
3023 .encrypt = aead_encrypt,
3024 .decrypt = aead_decrypt,
3025 .givencrypt = aead_null_givencrypt,
3026 .geniv = "<built-in>",
3027 .ivsize = NULL_IV_SIZE,
3028 .maxauthsize = SHA224_DIGEST_SIZE,
3029 },
3030 .class1_alg_type = 0,
3031 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3032 OP_ALG_AAI_HMAC_PRECOMP,
3033 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3034 },
3035 {
3036 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3037 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3038 .blocksize = NULL_BLOCK_SIZE,
3039 .type = CRYPTO_ALG_TYPE_AEAD,
3040 .template_aead = {
3041 .setkey = aead_setkey,
3042 .setauthsize = aead_setauthsize,
3043 .encrypt = aead_encrypt,
3044 .decrypt = aead_decrypt,
3045 .givencrypt = aead_null_givencrypt,
3046 .geniv = "<built-in>",
3047 .ivsize = NULL_IV_SIZE,
3048 .maxauthsize = SHA256_DIGEST_SIZE,
3049 },
3050 .class1_alg_type = 0,
3051 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3052 OP_ALG_AAI_HMAC_PRECOMP,
3053 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3054 },
3055 {
3056 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3057 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3058 .blocksize = NULL_BLOCK_SIZE,
3059 .type = CRYPTO_ALG_TYPE_AEAD,
3060 .template_aead = {
3061 .setkey = aead_setkey,
3062 .setauthsize = aead_setauthsize,
3063 .encrypt = aead_encrypt,
3064 .decrypt = aead_decrypt,
3065 .givencrypt = aead_null_givencrypt,
3066 .geniv = "<built-in>",
3067 .ivsize = NULL_IV_SIZE,
3068 .maxauthsize = SHA384_DIGEST_SIZE,
3069 },
3070 .class1_alg_type = 0,
3071 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3072 OP_ALG_AAI_HMAC_PRECOMP,
3073 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3074 },
3075 {
3076 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3077 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3078 .blocksize = NULL_BLOCK_SIZE,
3079 .type = CRYPTO_ALG_TYPE_AEAD,
3080 .template_aead = {
3081 .setkey = aead_setkey,
3082 .setauthsize = aead_setauthsize,
3083 .encrypt = aead_encrypt,
3084 .decrypt = aead_decrypt,
3085 .givencrypt = aead_null_givencrypt,
3086 .geniv = "<built-in>",
3087 .ivsize = NULL_IV_SIZE,
3088 .maxauthsize = SHA512_DIGEST_SIZE,
3089 },
3090 .class1_alg_type = 0,
3091 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3092 OP_ALG_AAI_HMAC_PRECOMP,
3093 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3094 },
3095 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003096 .name = "authenc(hmac(md5),cbc(aes))",
3097 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3098 .blocksize = AES_BLOCK_SIZE,
3099 .type = CRYPTO_ALG_TYPE_AEAD,
3100 .template_aead = {
3101 .setkey = aead_setkey,
3102 .setauthsize = aead_setauthsize,
3103 .encrypt = aead_encrypt,
3104 .decrypt = aead_decrypt,
3105 .givencrypt = aead_givencrypt,
3106 .geniv = "<built-in>",
3107 .ivsize = AES_BLOCK_SIZE,
3108 .maxauthsize = MD5_DIGEST_SIZE,
3109 },
3110 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3111 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3112 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3113 },
3114 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003115 .name = "authenc(hmac(sha1),cbc(aes))",
3116 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3117 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003118 .type = CRYPTO_ALG_TYPE_AEAD,
3119 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003120 .setkey = aead_setkey,
3121 .setauthsize = aead_setauthsize,
3122 .encrypt = aead_encrypt,
3123 .decrypt = aead_decrypt,
3124 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003125 .geniv = "<built-in>",
3126 .ivsize = AES_BLOCK_SIZE,
3127 .maxauthsize = SHA1_DIGEST_SIZE,
3128 },
3129 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3130 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3131 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3132 },
3133 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003134 .name = "authenc(hmac(sha224),cbc(aes))",
3135 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3136 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303137 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003138 .template_aead = {
3139 .setkey = aead_setkey,
3140 .setauthsize = aead_setauthsize,
3141 .encrypt = aead_encrypt,
3142 .decrypt = aead_decrypt,
3143 .givencrypt = aead_givencrypt,
3144 .geniv = "<built-in>",
3145 .ivsize = AES_BLOCK_SIZE,
3146 .maxauthsize = SHA224_DIGEST_SIZE,
3147 },
3148 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3149 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3150 OP_ALG_AAI_HMAC_PRECOMP,
3151 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3152 },
3153 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003154 .name = "authenc(hmac(sha256),cbc(aes))",
3155 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3156 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003157 .type = CRYPTO_ALG_TYPE_AEAD,
3158 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003159 .setkey = aead_setkey,
3160 .setauthsize = aead_setauthsize,
3161 .encrypt = aead_encrypt,
3162 .decrypt = aead_decrypt,
3163 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003164 .geniv = "<built-in>",
3165 .ivsize = AES_BLOCK_SIZE,
3166 .maxauthsize = SHA256_DIGEST_SIZE,
3167 },
3168 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3169 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3170 OP_ALG_AAI_HMAC_PRECOMP,
3171 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3172 },
3173 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003174 .name = "authenc(hmac(sha384),cbc(aes))",
3175 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3176 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303177 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003178 .template_aead = {
3179 .setkey = aead_setkey,
3180 .setauthsize = aead_setauthsize,
3181 .encrypt = aead_encrypt,
3182 .decrypt = aead_decrypt,
3183 .givencrypt = aead_givencrypt,
3184 .geniv = "<built-in>",
3185 .ivsize = AES_BLOCK_SIZE,
3186 .maxauthsize = SHA384_DIGEST_SIZE,
3187 },
3188 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3189 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3190 OP_ALG_AAI_HMAC_PRECOMP,
3191 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3192 },
3193
3194 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003195 .name = "authenc(hmac(sha512),cbc(aes))",
3196 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3197 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003198 .type = CRYPTO_ALG_TYPE_AEAD,
3199 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003200 .setkey = aead_setkey,
3201 .setauthsize = aead_setauthsize,
3202 .encrypt = aead_encrypt,
3203 .decrypt = aead_decrypt,
3204 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003205 .geniv = "<built-in>",
3206 .ivsize = AES_BLOCK_SIZE,
3207 .maxauthsize = SHA512_DIGEST_SIZE,
3208 },
3209 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3210 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3211 OP_ALG_AAI_HMAC_PRECOMP,
3212 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3213 },
3214 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003215 .name = "authenc(hmac(md5),cbc(des3_ede))",
3216 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3217 .blocksize = DES3_EDE_BLOCK_SIZE,
3218 .type = CRYPTO_ALG_TYPE_AEAD,
3219 .template_aead = {
3220 .setkey = aead_setkey,
3221 .setauthsize = aead_setauthsize,
3222 .encrypt = aead_encrypt,
3223 .decrypt = aead_decrypt,
3224 .givencrypt = aead_givencrypt,
3225 .geniv = "<built-in>",
3226 .ivsize = DES3_EDE_BLOCK_SIZE,
3227 .maxauthsize = MD5_DIGEST_SIZE,
3228 },
3229 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3230 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3231 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3232 },
3233 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003234 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3235 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3236 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003237 .type = CRYPTO_ALG_TYPE_AEAD,
3238 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003239 .setkey = aead_setkey,
3240 .setauthsize = aead_setauthsize,
3241 .encrypt = aead_encrypt,
3242 .decrypt = aead_decrypt,
3243 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003244 .geniv = "<built-in>",
3245 .ivsize = DES3_EDE_BLOCK_SIZE,
3246 .maxauthsize = SHA1_DIGEST_SIZE,
3247 },
3248 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3249 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3250 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3251 },
3252 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003253 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3254 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3255 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303256 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003257 .template_aead = {
3258 .setkey = aead_setkey,
3259 .setauthsize = aead_setauthsize,
3260 .encrypt = aead_encrypt,
3261 .decrypt = aead_decrypt,
3262 .givencrypt = aead_givencrypt,
3263 .geniv = "<built-in>",
3264 .ivsize = DES3_EDE_BLOCK_SIZE,
3265 .maxauthsize = SHA224_DIGEST_SIZE,
3266 },
3267 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3268 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3269 OP_ALG_AAI_HMAC_PRECOMP,
3270 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3271 },
3272 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003273 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3274 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3275 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003276 .type = CRYPTO_ALG_TYPE_AEAD,
3277 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003278 .setkey = aead_setkey,
3279 .setauthsize = aead_setauthsize,
3280 .encrypt = aead_encrypt,
3281 .decrypt = aead_decrypt,
3282 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003283 .geniv = "<built-in>",
3284 .ivsize = DES3_EDE_BLOCK_SIZE,
3285 .maxauthsize = SHA256_DIGEST_SIZE,
3286 },
3287 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3288 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3289 OP_ALG_AAI_HMAC_PRECOMP,
3290 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3291 },
3292 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003293 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3294 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3295 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303296 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003297 .template_aead = {
3298 .setkey = aead_setkey,
3299 .setauthsize = aead_setauthsize,
3300 .encrypt = aead_encrypt,
3301 .decrypt = aead_decrypt,
3302 .givencrypt = aead_givencrypt,
3303 .geniv = "<built-in>",
3304 .ivsize = DES3_EDE_BLOCK_SIZE,
3305 .maxauthsize = SHA384_DIGEST_SIZE,
3306 },
3307 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3308 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3309 OP_ALG_AAI_HMAC_PRECOMP,
3310 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3311 },
3312 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003313 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3314 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3315 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003316 .type = CRYPTO_ALG_TYPE_AEAD,
3317 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003318 .setkey = aead_setkey,
3319 .setauthsize = aead_setauthsize,
3320 .encrypt = aead_encrypt,
3321 .decrypt = aead_decrypt,
3322 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003323 .geniv = "<built-in>",
3324 .ivsize = DES3_EDE_BLOCK_SIZE,
3325 .maxauthsize = SHA512_DIGEST_SIZE,
3326 },
3327 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3328 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3329 OP_ALG_AAI_HMAC_PRECOMP,
3330 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3331 },
3332 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003333 .name = "authenc(hmac(md5),cbc(des))",
3334 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3335 .blocksize = DES_BLOCK_SIZE,
3336 .type = CRYPTO_ALG_TYPE_AEAD,
3337 .template_aead = {
3338 .setkey = aead_setkey,
3339 .setauthsize = aead_setauthsize,
3340 .encrypt = aead_encrypt,
3341 .decrypt = aead_decrypt,
3342 .givencrypt = aead_givencrypt,
3343 .geniv = "<built-in>",
3344 .ivsize = DES_BLOCK_SIZE,
3345 .maxauthsize = MD5_DIGEST_SIZE,
3346 },
3347 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3348 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3349 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3350 },
3351 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003352 .name = "authenc(hmac(sha1),cbc(des))",
3353 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3354 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003355 .type = CRYPTO_ALG_TYPE_AEAD,
3356 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003357 .setkey = aead_setkey,
3358 .setauthsize = aead_setauthsize,
3359 .encrypt = aead_encrypt,
3360 .decrypt = aead_decrypt,
3361 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003362 .geniv = "<built-in>",
3363 .ivsize = DES_BLOCK_SIZE,
3364 .maxauthsize = SHA1_DIGEST_SIZE,
3365 },
3366 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3367 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3368 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3369 },
3370 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003371 .name = "authenc(hmac(sha224),cbc(des))",
3372 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3373 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303374 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003375 .template_aead = {
3376 .setkey = aead_setkey,
3377 .setauthsize = aead_setauthsize,
3378 .encrypt = aead_encrypt,
3379 .decrypt = aead_decrypt,
3380 .givencrypt = aead_givencrypt,
3381 .geniv = "<built-in>",
3382 .ivsize = DES_BLOCK_SIZE,
3383 .maxauthsize = SHA224_DIGEST_SIZE,
3384 },
3385 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3386 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3387 OP_ALG_AAI_HMAC_PRECOMP,
3388 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3389 },
3390 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003391 .name = "authenc(hmac(sha256),cbc(des))",
3392 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3393 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003394 .type = CRYPTO_ALG_TYPE_AEAD,
3395 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003396 .setkey = aead_setkey,
3397 .setauthsize = aead_setauthsize,
3398 .encrypt = aead_encrypt,
3399 .decrypt = aead_decrypt,
3400 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003401 .geniv = "<built-in>",
3402 .ivsize = DES_BLOCK_SIZE,
3403 .maxauthsize = SHA256_DIGEST_SIZE,
3404 },
3405 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3406 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3407 OP_ALG_AAI_HMAC_PRECOMP,
3408 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3409 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05003410 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003411 .name = "authenc(hmac(sha384),cbc(des))",
3412 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3413 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303414 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003415 .template_aead = {
3416 .setkey = aead_setkey,
3417 .setauthsize = aead_setauthsize,
3418 .encrypt = aead_encrypt,
3419 .decrypt = aead_decrypt,
3420 .givencrypt = aead_givencrypt,
3421 .geniv = "<built-in>",
3422 .ivsize = DES_BLOCK_SIZE,
3423 .maxauthsize = SHA384_DIGEST_SIZE,
3424 },
3425 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3426 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3427 OP_ALG_AAI_HMAC_PRECOMP,
3428 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3429 },
3430 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003431 .name = "authenc(hmac(sha512),cbc(des))",
3432 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3433 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003434 .type = CRYPTO_ALG_TYPE_AEAD,
3435 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003436 .setkey = aead_setkey,
3437 .setauthsize = aead_setauthsize,
3438 .encrypt = aead_encrypt,
3439 .decrypt = aead_decrypt,
3440 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003441 .geniv = "<built-in>",
3442 .ivsize = DES_BLOCK_SIZE,
3443 .maxauthsize = SHA512_DIGEST_SIZE,
3444 },
3445 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3446 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3447 OP_ALG_AAI_HMAC_PRECOMP,
3448 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3449 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003450 {
3451 .name = "rfc4106(gcm(aes))",
3452 .driver_name = "rfc4106-gcm-aes-caam",
3453 .blocksize = 1,
3454 .type = CRYPTO_ALG_TYPE_AEAD,
3455 .template_aead = {
3456 .setkey = rfc4106_setkey,
3457 .setauthsize = rfc4106_setauthsize,
3458 .encrypt = aead_encrypt,
3459 .decrypt = aead_decrypt,
3460 .givencrypt = aead_givencrypt,
3461 .geniv = "<built-in>",
3462 .ivsize = 8,
3463 .maxauthsize = AES_BLOCK_SIZE,
3464 },
3465 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3466 },
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02003467 {
3468 .name = "rfc4543(gcm(aes))",
3469 .driver_name = "rfc4543-gcm-aes-caam",
3470 .blocksize = 1,
3471 .type = CRYPTO_ALG_TYPE_AEAD,
3472 .template_aead = {
3473 .setkey = rfc4543_setkey,
3474 .setauthsize = rfc4543_setauthsize,
3475 .encrypt = aead_encrypt,
3476 .decrypt = aead_decrypt,
3477 .givencrypt = aead_givencrypt,
3478 .geniv = "<built-in>",
3479 .ivsize = 8,
3480 .maxauthsize = AES_BLOCK_SIZE,
3481 },
3482 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3483 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03003484 /* Galois Counter Mode */
3485 {
3486 .name = "gcm(aes)",
3487 .driver_name = "gcm-aes-caam",
3488 .blocksize = 1,
3489 .type = CRYPTO_ALG_TYPE_AEAD,
3490 .template_aead = {
3491 .setkey = gcm_setkey,
3492 .setauthsize = gcm_setauthsize,
3493 .encrypt = aead_encrypt,
3494 .decrypt = aead_decrypt,
3495 .givencrypt = NULL,
3496 .geniv = "<built-in>",
3497 .ivsize = 12,
3498 .maxauthsize = AES_BLOCK_SIZE,
3499 },
3500 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3501 },
Yuan Kangacdca312011-07-15 11:21:42 +08003502 /* ablkcipher descriptor */
3503 {
3504 .name = "cbc(aes)",
3505 .driver_name = "cbc-aes-caam",
3506 .blocksize = AES_BLOCK_SIZE,
3507 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3508 .template_ablkcipher = {
3509 .setkey = ablkcipher_setkey,
3510 .encrypt = ablkcipher_encrypt,
3511 .decrypt = ablkcipher_decrypt,
3512 .geniv = "eseqiv",
3513 .min_keysize = AES_MIN_KEY_SIZE,
3514 .max_keysize = AES_MAX_KEY_SIZE,
3515 .ivsize = AES_BLOCK_SIZE,
3516 },
3517 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3518 },
3519 {
3520 .name = "cbc(des3_ede)",
3521 .driver_name = "cbc-3des-caam",
3522 .blocksize = DES3_EDE_BLOCK_SIZE,
3523 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3524 .template_ablkcipher = {
3525 .setkey = ablkcipher_setkey,
3526 .encrypt = ablkcipher_encrypt,
3527 .decrypt = ablkcipher_decrypt,
3528 .geniv = "eseqiv",
3529 .min_keysize = DES3_EDE_KEY_SIZE,
3530 .max_keysize = DES3_EDE_KEY_SIZE,
3531 .ivsize = DES3_EDE_BLOCK_SIZE,
3532 },
3533 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3534 },
3535 {
3536 .name = "cbc(des)",
3537 .driver_name = "cbc-des-caam",
3538 .blocksize = DES_BLOCK_SIZE,
3539 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3540 .template_ablkcipher = {
3541 .setkey = ablkcipher_setkey,
3542 .encrypt = ablkcipher_encrypt,
3543 .decrypt = ablkcipher_decrypt,
3544 .geniv = "eseqiv",
3545 .min_keysize = DES_KEY_SIZE,
3546 .max_keysize = DES_KEY_SIZE,
3547 .ivsize = DES_BLOCK_SIZE,
3548 },
3549 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02003550 },
3551 {
3552 .name = "ctr(aes)",
3553 .driver_name = "ctr-aes-caam",
3554 .blocksize = 1,
3555 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3556 .template_ablkcipher = {
3557 .setkey = ablkcipher_setkey,
3558 .encrypt = ablkcipher_encrypt,
3559 .decrypt = ablkcipher_decrypt,
3560 .geniv = "chainiv",
3561 .min_keysize = AES_MIN_KEY_SIZE,
3562 .max_keysize = AES_MAX_KEY_SIZE,
3563 .ivsize = AES_BLOCK_SIZE,
3564 },
3565 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Yuan Kangacdca312011-07-15 11:21:42 +08003566 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003567};
3568
3569struct caam_crypto_alg {
3570 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003571 int class1_alg_type;
3572 int class2_alg_type;
3573 int alg_op;
3574 struct crypto_alg crypto_alg;
3575};
3576
3577static int caam_cra_init(struct crypto_tfm *tfm)
3578{
3579 struct crypto_alg *alg = tfm->__crt_alg;
3580 struct caam_crypto_alg *caam_alg =
3581 container_of(alg, struct caam_crypto_alg, crypto_alg);
3582 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003583
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303584 ctx->jrdev = caam_jr_alloc();
3585 if (IS_ERR(ctx->jrdev)) {
3586 pr_err("Job Ring Device allocation for transform failed\n");
3587 return PTR_ERR(ctx->jrdev);
3588 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003589
3590 /* copy descriptor header template value */
3591 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
3592 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
3593 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
3594
3595 return 0;
3596}
3597
3598static void caam_cra_exit(struct crypto_tfm *tfm)
3599{
3600 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3601
Yuan Kang1acebad2011-07-15 11:21:42 +08003602 if (ctx->sh_desc_enc_dma &&
3603 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3604 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3605 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3606 if (ctx->sh_desc_dec_dma &&
3607 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3608 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3609 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3610 if (ctx->sh_desc_givenc_dma &&
3611 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3612 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3613 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003614 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003615 if (ctx->key_dma &&
3616 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3617 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3618 ctx->enckeylen + ctx->split_key_pad_len,
3619 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303620
3621 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003622}
3623
3624static void __exit caam_algapi_exit(void)
3625{
3626
Kim Phillips8e8ec592011-03-13 16:54:26 +08003627 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003628
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303629 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003630 return;
3631
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303632 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003633 crypto_unregister_alg(&t_alg->crypto_alg);
3634 list_del(&t_alg->entry);
3635 kfree(t_alg);
3636 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003637}
3638
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303639static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003640 *template)
3641{
3642 struct caam_crypto_alg *t_alg;
3643 struct crypto_alg *alg;
3644
3645 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
3646 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303647 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003648 return ERR_PTR(-ENOMEM);
3649 }
3650
3651 alg = &t_alg->crypto_alg;
3652
3653 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3654 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3655 template->driver_name);
3656 alg->cra_module = THIS_MODULE;
3657 alg->cra_init = caam_cra_init;
3658 alg->cra_exit = caam_cra_exit;
3659 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003660 alg->cra_blocksize = template->blocksize;
3661 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003662 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003663 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3664 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003665 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08003666 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3667 alg->cra_type = &crypto_ablkcipher_type;
3668 alg->cra_ablkcipher = template->template_ablkcipher;
3669 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003670 case CRYPTO_ALG_TYPE_AEAD:
3671 alg->cra_type = &crypto_aead_type;
3672 alg->cra_aead = template->template_aead;
3673 break;
3674 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003675
3676 t_alg->class1_alg_type = template->class1_alg_type;
3677 t_alg->class2_alg_type = template->class2_alg_type;
3678 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003679
3680 return t_alg;
3681}
3682
3683static int __init caam_algapi_init(void)
3684{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303685 struct device_node *dev_node;
3686 struct platform_device *pdev;
3687 struct device *ctrldev;
3688 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003689 int i = 0, err = 0;
3690
Ruchika Gupta35af6402014-07-07 10:42:12 +05303691 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3692 if (!dev_node) {
3693 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3694 if (!dev_node)
3695 return -ENODEV;
3696 }
3697
3698 pdev = of_find_device_by_node(dev_node);
3699 if (!pdev) {
3700 of_node_put(dev_node);
3701 return -ENODEV;
3702 }
3703
3704 ctrldev = &pdev->dev;
3705 priv = dev_get_drvdata(ctrldev);
3706 of_node_put(dev_node);
3707
3708 /*
3709 * If priv is NULL, it's probably because the caam driver wasn't
3710 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3711 */
3712 if (!priv)
3713 return -ENODEV;
3714
3715
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303716 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003717
3718 /* register crypto algorithms the device supports */
3719 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3720 /* TODO: check if h/w supports alg */
3721 struct caam_crypto_alg *t_alg;
3722
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303723 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003724 if (IS_ERR(t_alg)) {
3725 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303726 pr_warn("%s alg allocation failed\n",
3727 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003728 continue;
3729 }
3730
3731 err = crypto_register_alg(&t_alg->crypto_alg);
3732 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303733 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08003734 t_alg->crypto_alg.cra_driver_name);
3735 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02003736 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303737 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003738 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303739 if (!list_empty(&alg_list))
3740 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003741
3742 return err;
3743}
3744
3745module_init(caam_algapi_init);
3746module_exit(caam_algapi_exit);
3747
3748MODULE_LICENSE("GPL");
3749MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3750MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");