blob: 3c50a5082127416c2564fa09d9ce69c228b44ec8 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71
Kim Phillips4427b1b2011-05-14 22:08:17 -050072/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080073#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020074#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
75#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080076#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
77
Catalin Vasiledaebc462014-10-31 12:45:37 +020078/* Note: Nonce is counted in enckeylen */
79#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
80
Horia Geantaae4a8252014-03-14 17:46:52 +020081#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
82#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
83#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
84
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030085#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080086#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
87#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030088
Tudor Ambarusbac68f22014-10-23 16:14:03 +030089#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
Herbert Xu46218752015-07-09 07:17:33 +080090#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 12 * CAAM_CMD_SZ)
91#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarusbac68f22014-10-23 16:14:03 +030092
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020093#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080094#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
95#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020096
Yuan Kangacdca312011-07-15 11:21:42 +080097#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
98#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
99 20 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
101 15 * CAAM_CMD_SZ)
102
Herbert Xu87e51b02015-06-18 14:25:55 +0800103#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
104#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500105
Kim Phillips8e8ec592011-03-13 16:54:26 +0800106#ifdef DEBUG
107/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108#define debug(format, arg...) printk(format, arg)
109#else
110#define debug(format, arg...)
111#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530112static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800113
Yuan Kang1acebad2011-07-15 11:21:42 +0800114/* Set DK bit in class 1 operation if shared */
115static inline void append_dec_op1(u32 *desc, u32 type)
116{
117 u32 *jump_cmd, *uncond_jump_cmd;
118
Horia Geantaa60384d2014-07-11 15:46:58 +0300119 /* DK bit is valid only for AES */
120 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
121 append_operation(desc, type | OP_ALG_AS_INITFINAL |
122 OP_ALG_DECRYPT);
123 return;
124 }
125
Yuan Kang1acebad2011-07-15 11:21:42 +0800126 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
127 append_operation(desc, type | OP_ALG_AS_INITFINAL |
128 OP_ALG_DECRYPT);
129 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
130 set_jump_tgt_here(desc, jump_cmd);
131 append_operation(desc, type | OP_ALG_AS_INITFINAL |
132 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
133 set_jump_tgt_here(desc, uncond_jump_cmd);
134}
135
136/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800137 * For aead functions, read payload and write payload,
138 * both of which are specified in req->src and req->dst
139 */
140static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
141{
Horia Geantaae4a8252014-03-14 17:46:52 +0200142 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800143 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
144 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800145}
146
147/*
148 * For aead encrypt and decrypt, read iv for both classes
149 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200150static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
Yuan Kang1acebad2011-07-15 11:21:42 +0800151{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200152 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
153 LDST_SRCDST_BYTE_CONTEXT |
154 (ivoffset << LDST_OFFSET_SHIFT));
155 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
156 (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800157}
158
159/*
Yuan Kangacdca312011-07-15 11:21:42 +0800160 * For ablkcipher encrypt and decrypt, read from req->src and
161 * write to req->dst
162 */
163static inline void ablkcipher_append_src_dst(u32 *desc)
164{
Kim Phillips70d793c2012-06-22 19:42:35 -0500165 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
166 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
167 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
168 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
169 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800170}
171
172/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800173 * If all data, including src (with assoc and iv) or dst (with iv only) are
174 * contiguous
175 */
176#define GIV_SRC_CONTIG 1
177#define GIV_DST_CONTIG (1 << 1)
178
Kim Phillips8e8ec592011-03-13 16:54:26 +0800179/*
180 * per-session context
181 */
182struct caam_ctx {
183 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800184 u32 sh_desc_enc[DESC_MAX_USED_LEN];
185 u32 sh_desc_dec[DESC_MAX_USED_LEN];
186 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
187 dma_addr_t sh_desc_enc_dma;
188 dma_addr_t sh_desc_dec_dma;
189 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800190 u32 class1_alg_type;
191 u32 class2_alg_type;
192 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800193 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800194 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800195 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800196 unsigned int split_key_len;
197 unsigned int split_key_pad_len;
198 unsigned int authsize;
199};
200
Yuan Kang1acebad2011-07-15 11:21:42 +0800201static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200202 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800203{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200204 u32 *nonce;
205 unsigned int enckeylen = ctx->enckeylen;
206
207 /*
208 * RFC3686 specific:
209 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
210 * | enckeylen = encryption key size + nonce size
211 */
212 if (is_rfc3686)
213 enckeylen -= CTR_RFC3686_NONCE_SIZE;
214
Yuan Kang1acebad2011-07-15 11:21:42 +0800215 if (keys_fit_inline) {
216 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
217 ctx->split_key_len, CLASS_2 |
218 KEY_DEST_MDHA_SPLIT | KEY_ENC);
219 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200220 ctx->split_key_pad_len, enckeylen,
221 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad2011-07-15 11:21:42 +0800222 } else {
223 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
224 KEY_DEST_MDHA_SPLIT | KEY_ENC);
225 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200226 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
227 }
228
229 /* Load Counter into CONTEXT1 reg */
230 if (is_rfc3686) {
231 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
232 enckeylen);
233 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
234 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
235 append_move(desc,
236 MOVE_SRC_OUTFIFO |
237 MOVE_DEST_CLASS1CTX |
238 (16 << MOVE_OFFSET_SHIFT) |
239 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800240 }
241}
242
243static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200244 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800245{
246 u32 *key_jump_cmd;
247
Catalin Vasiledaebc462014-10-31 12:45:37 +0200248 /* Note: Context registers are saved. */
249 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad2011-07-15 11:21:42 +0800250
251 /* Skip if already shared */
252 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
253 JUMP_COND_SHRD);
254
Catalin Vasiledaebc462014-10-31 12:45:37 +0200255 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800256
257 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800258}
259
Horia Geantaae4a8252014-03-14 17:46:52 +0200260static int aead_null_set_sh_desc(struct crypto_aead *aead)
261{
Herbert Xuadd86d52015-05-11 17:47:50 +0800262 unsigned int ivsize = crypto_aead_ivsize(aead);
Horia Geantaae4a8252014-03-14 17:46:52 +0200263 struct caam_ctx *ctx = crypto_aead_ctx(aead);
264 struct device *jrdev = ctx->jrdev;
265 bool keys_fit_inline = false;
266 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
267 u32 *desc;
268
269 /*
270 * Job Descriptor and Shared Descriptors
271 * must all fit into the 64-word Descriptor h/w Buffer
272 */
273 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
274 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
275 keys_fit_inline = true;
276
Herbert Xuf2147b82015-06-16 13:54:23 +0800277 /* old_aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200278 desc = ctx->sh_desc_enc;
279
280 init_sh_desc(desc, HDR_SHARE_SERIAL);
281
282 /* Skip if already shared */
283 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
284 JUMP_COND_SHRD);
285 if (keys_fit_inline)
286 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
287 ctx->split_key_len, CLASS_2 |
288 KEY_DEST_MDHA_SPLIT | KEY_ENC);
289 else
290 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
291 KEY_DEST_MDHA_SPLIT | KEY_ENC);
292 set_jump_tgt_here(desc, key_jump_cmd);
293
294 /* cryptlen = seqoutlen - authsize */
295 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
296
297 /*
298 * NULL encryption; IV is zero
299 * assoclen = (assoclen + cryptlen) - cryptlen
300 */
301 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
302
303 /* read assoc before reading payload */
304 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
305 KEY_VLF);
306
307 /* Prepare to read and write cryptlen bytes */
308 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
309 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
310
311 /*
312 * MOVE_LEN opcode is not available in all SEC HW revisions,
313 * thus need to do some magic, i.e. self-patch the descriptor
314 * buffer.
315 */
316 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
317 MOVE_DEST_MATH3 |
318 (0x6 << MOVE_LEN_SHIFT));
319 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
320 MOVE_DEST_DESCBUF |
321 MOVE_WAITCOMP |
322 (0x8 << MOVE_LEN_SHIFT));
323
324 /* Class 2 operation */
325 append_operation(desc, ctx->class2_alg_type |
326 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
327
328 /* Read and write cryptlen bytes */
329 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
330
331 set_move_tgt_here(desc, read_move_cmd);
332 set_move_tgt_here(desc, write_move_cmd);
333 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
334 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
335 MOVE_AUX_LS);
336
337 /* Write ICV */
338 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
339 LDST_SRCDST_BYTE_CONTEXT);
340
341 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
342 desc_bytes(desc),
343 DMA_TO_DEVICE);
344 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
345 dev_err(jrdev, "unable to map shared descriptor\n");
346 return -ENOMEM;
347 }
348#ifdef DEBUG
349 print_hex_dump(KERN_ERR,
350 "aead null enc shdesc@"__stringify(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, desc,
352 desc_bytes(desc), 1);
353#endif
354
355 /*
356 * Job Descriptor and Shared Descriptors
357 * must all fit into the 64-word Descriptor h/w Buffer
358 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500359 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200360 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
361 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
362 keys_fit_inline = true;
363
364 desc = ctx->sh_desc_dec;
365
Herbert Xuf2147b82015-06-16 13:54:23 +0800366 /* old_aead_decrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200367 init_sh_desc(desc, HDR_SHARE_SERIAL);
368
369 /* Skip if already shared */
370 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
371 JUMP_COND_SHRD);
372 if (keys_fit_inline)
373 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
374 ctx->split_key_len, CLASS_2 |
375 KEY_DEST_MDHA_SPLIT | KEY_ENC);
376 else
377 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
378 KEY_DEST_MDHA_SPLIT | KEY_ENC);
379 set_jump_tgt_here(desc, key_jump_cmd);
380
381 /* Class 2 operation */
382 append_operation(desc, ctx->class2_alg_type |
383 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
384
385 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
386 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Herbert Xuadd86d52015-05-11 17:47:50 +0800387 ctx->authsize + ivsize);
Horia Geantaae4a8252014-03-14 17:46:52 +0200388 /* assoclen = (assoclen + cryptlen) - cryptlen */
389 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
390 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
391
392 /* read assoc before reading payload */
393 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
394 KEY_VLF);
395
396 /* Prepare to read and write cryptlen bytes */
397 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
398 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
399
400 /*
401 * MOVE_LEN opcode is not available in all SEC HW revisions,
402 * thus need to do some magic, i.e. self-patch the descriptor
403 * buffer.
404 */
405 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
406 MOVE_DEST_MATH2 |
407 (0x6 << MOVE_LEN_SHIFT));
408 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
409 MOVE_DEST_DESCBUF |
410 MOVE_WAITCOMP |
411 (0x8 << MOVE_LEN_SHIFT));
412
413 /* Read and write cryptlen bytes */
414 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
415
416 /*
417 * Insert a NOP here, since we need at least 4 instructions between
418 * code patching the descriptor buffer and the location being patched.
419 */
420 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
421 set_jump_tgt_here(desc, jump_cmd);
422
423 set_move_tgt_here(desc, read_move_cmd);
424 set_move_tgt_here(desc, write_move_cmd);
425 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
426 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
427 MOVE_AUX_LS);
428 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
429
430 /* Load ICV */
431 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
432 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
433
434 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
435 desc_bytes(desc),
436 DMA_TO_DEVICE);
437 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
438 dev_err(jrdev, "unable to map shared descriptor\n");
439 return -ENOMEM;
440 }
441#ifdef DEBUG
442 print_hex_dump(KERN_ERR,
443 "aead null dec shdesc@"__stringify(__LINE__)": ",
444 DUMP_PREFIX_ADDRESS, 16, 4, desc,
445 desc_bytes(desc), 1);
446#endif
447
448 return 0;
449}
450
Yuan Kang1acebad2011-07-15 11:21:42 +0800451static int aead_set_sh_desc(struct crypto_aead *aead)
452{
Herbert Xuadd86d52015-05-11 17:47:50 +0800453 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800454 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200455 struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
456 const char *alg_name = crypto_tfm_alg_name(ctfm);
Yuan Kang1acebad2011-07-15 11:21:42 +0800457 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200458 bool keys_fit_inline;
Yuan Kang1acebad2011-07-15 11:21:42 +0800459 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200460 u32 ctx1_iv_off = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +0800461 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200462 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
463 OP_ALG_AAI_CTR_MOD128);
464 const bool is_rfc3686 = (ctr_mode &&
465 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kang1acebad2011-07-15 11:21:42 +0800466
Horia Geantaae4a8252014-03-14 17:46:52 +0200467 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800468 return 0;
469
Horia Geantaae4a8252014-03-14 17:46:52 +0200470 /* NULL encryption / decryption */
471 if (!ctx->enckeylen)
472 return aead_null_set_sh_desc(aead);
473
Yuan Kang1acebad2011-07-15 11:21:42 +0800474 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200475 * AES-CTR needs to load IV in CONTEXT1 reg
476 * at an offset of 128bits (16bytes)
477 * CONTEXT1[255:128] = IV
478 */
479 if (ctr_mode)
480 ctx1_iv_off = 16;
481
482 /*
483 * RFC3686 specific:
484 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
485 */
486 if (is_rfc3686)
487 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
488
489 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800490 * Job Descriptor and Shared Descriptors
491 * must all fit into the 64-word Descriptor h/w Buffer
492 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200493 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800494 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200495 ctx->split_key_pad_len + ctx->enckeylen +
496 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800497 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800498 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800499
Herbert Xuf2147b82015-06-16 13:54:23 +0800500 /* old_aead_encrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800501 desc = ctx->sh_desc_enc;
502
Catalin Vasiledaebc462014-10-31 12:45:37 +0200503 /* Note: Context registers are saved. */
504 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800505
506 /* Class 2 operation */
507 append_operation(desc, ctx->class2_alg_type |
508 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
509
510 /* cryptlen = seqoutlen - authsize */
511 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
512
513 /* assoclen + cryptlen = seqinlen - ivsize */
Herbert Xuadd86d52015-05-11 17:47:50 +0800514 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800515
Horia Geanta4464a7d2014-03-14 17:46:49 +0200516 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800517 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
518
519 /* read assoc before reading payload */
520 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
521 KEY_VLF);
Herbert Xuadd86d52015-05-11 17:47:50 +0800522 aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200523
524 /* Load Counter into CONTEXT1 reg */
525 if (is_rfc3686)
526 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
527 LDST_CLASS_1_CCB |
528 LDST_SRCDST_BYTE_CONTEXT |
529 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
530 LDST_OFFSET_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800531
532 /* Class 1 operation */
533 append_operation(desc, ctx->class1_alg_type |
534 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
535
536 /* Read and write cryptlen bytes */
537 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
538 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
539 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
540
541 /* Write ICV */
542 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
543 LDST_SRCDST_BYTE_CONTEXT);
544
545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
546 desc_bytes(desc),
547 DMA_TO_DEVICE);
548 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
549 dev_err(jrdev, "unable to map shared descriptor\n");
550 return -ENOMEM;
551 }
552#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300553 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800554 DUMP_PREFIX_ADDRESS, 16, 4, desc,
555 desc_bytes(desc), 1);
556#endif
557
558 /*
559 * Job Descriptor and Shared Descriptors
560 * must all fit into the 64-word Descriptor h/w Buffer
561 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500562 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800563 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200564 ctx->split_key_pad_len + ctx->enckeylen +
565 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800566 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800567 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800568
Herbert Xuf2147b82015-06-16 13:54:23 +0800569 /* old_aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800570 desc = ctx->sh_desc_dec;
571
Catalin Vasiledaebc462014-10-31 12:45:37 +0200572 /* Note: Context registers are saved. */
573 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800574
575 /* Class 2 operation */
576 append_operation(desc, ctx->class2_alg_type |
577 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
578
Horia Geanta4464a7d2014-03-14 17:46:49 +0200579 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800580 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Herbert Xuadd86d52015-05-11 17:47:50 +0800581 ctx->authsize + ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800582 /* assoclen = (assoclen + cryptlen) - cryptlen */
583 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
584 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
585
586 /* read assoc before reading payload */
587 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
588 KEY_VLF);
589
Herbert Xuadd86d52015-05-11 17:47:50 +0800590 aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
Yuan Kang1acebad2011-07-15 11:21:42 +0800591
Catalin Vasiledaebc462014-10-31 12:45:37 +0200592 /* Load Counter into CONTEXT1 reg */
593 if (is_rfc3686)
594 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
595 LDST_CLASS_1_CCB |
596 LDST_SRCDST_BYTE_CONTEXT |
597 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
598 LDST_OFFSET_SHIFT));
599
600 /* Choose operation */
601 if (ctr_mode)
602 append_operation(desc, ctx->class1_alg_type |
603 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
604 else
605 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad2011-07-15 11:21:42 +0800606
607 /* Read and write cryptlen bytes */
608 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
609 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
610 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
611
612 /* Load ICV */
613 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
614 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800615
616 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
617 desc_bytes(desc),
618 DMA_TO_DEVICE);
619 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
620 dev_err(jrdev, "unable to map shared descriptor\n");
621 return -ENOMEM;
622 }
623#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300624 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800625 DUMP_PREFIX_ADDRESS, 16, 4, desc,
626 desc_bytes(desc), 1);
627#endif
628
629 /*
630 * Job Descriptor and Shared Descriptors
631 * must all fit into the 64-word Descriptor h/w Buffer
632 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500633 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800634 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200635 ctx->split_key_pad_len + ctx->enckeylen +
636 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800637 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800638 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800639
640 /* aead_givencrypt shared descriptor */
641 desc = ctx->sh_desc_givenc;
642
Catalin Vasiledaebc462014-10-31 12:45:37 +0200643 /* Note: Context registers are saved. */
644 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800645
646 /* Generate IV */
647 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
648 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800649 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800650 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
651 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
652 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200653 append_move(desc, MOVE_WAITCOMP |
654 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
655 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800656 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800657 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
658
659 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200660 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
661 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800662 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800663
664 /* Return to encryption */
665 append_operation(desc, ctx->class2_alg_type |
666 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
667
668 /* ivsize + cryptlen = seqoutlen - authsize */
669 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
670
671 /* assoclen = seqinlen - (ivsize + cryptlen) */
672 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
673
674 /* read assoc before reading payload */
675 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
676 KEY_VLF);
677
Catalin Vasiledaebc462014-10-31 12:45:37 +0200678 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad2011-07-15 11:21:42 +0800679 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800680 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800681 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
682 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
Herbert Xuadd86d52015-05-11 17:47:50 +0800683 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
Yuan Kang1acebad2011-07-15 11:21:42 +0800684 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
685
Catalin Vasiledaebc462014-10-31 12:45:37 +0200686 /* Load Counter into CONTEXT1 reg */
687 if (is_rfc3686)
688 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
689 LDST_CLASS_1_CCB |
690 LDST_SRCDST_BYTE_CONTEXT |
691 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
692 LDST_OFFSET_SHIFT));
693
Yuan Kang1acebad2011-07-15 11:21:42 +0800694 /* Class 1 operation */
695 append_operation(desc, ctx->class1_alg_type |
696 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
697
698 /* Will write ivsize + cryptlen */
699 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
700
701 /* Not need to reload iv */
Herbert Xuadd86d52015-05-11 17:47:50 +0800702 append_seq_fifo_load(desc, ivsize,
Yuan Kang1acebad2011-07-15 11:21:42 +0800703 FIFOLD_CLASS_SKIP);
704
705 /* Will read cryptlen */
706 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
707 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
708
709 /* Write ICV */
710 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
711 LDST_SRCDST_BYTE_CONTEXT);
712
713 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
714 desc_bytes(desc),
715 DMA_TO_DEVICE);
716 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
717 dev_err(jrdev, "unable to map shared descriptor\n");
718 return -ENOMEM;
719 }
720#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300721 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800722 DUMP_PREFIX_ADDRESS, 16, 4, desc,
723 desc_bytes(desc), 1);
724#endif
725
726 return 0;
727}
728
Yuan Kang0e479302011-07-15 11:21:41 +0800729static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800730 unsigned int authsize)
731{
732 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
733
734 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800735 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800736
737 return 0;
738}
739
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300740static int gcm_set_sh_desc(struct crypto_aead *aead)
741{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300742 struct caam_ctx *ctx = crypto_aead_ctx(aead);
743 struct device *jrdev = ctx->jrdev;
744 bool keys_fit_inline = false;
745 u32 *key_jump_cmd, *zero_payload_jump_cmd,
746 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
747 u32 *desc;
748
749 if (!ctx->enckeylen || !ctx->authsize)
750 return 0;
751
752 /*
753 * AES GCM encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
756 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800757 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300758 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
759 keys_fit_inline = true;
760
761 desc = ctx->sh_desc_enc;
762
763 init_sh_desc(desc, HDR_SHARE_SERIAL);
764
765 /* skip key loading if they are loaded due to sharing */
766 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767 JUMP_COND_SHRD | JUMP_COND_SELF);
768 if (keys_fit_inline)
769 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
770 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
771 else
772 append_key(desc, ctx->key_dma, ctx->enckeylen,
773 CLASS_1 | KEY_DEST_CLASS_REG);
774 set_jump_tgt_here(desc, key_jump_cmd);
775
776 /* class 1 operation */
777 append_operation(desc, ctx->class1_alg_type |
778 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
779
Herbert Xuf2147b82015-06-16 13:54:23 +0800780 /* if assoclen + cryptlen is ZERO, skip to ICV write */
781 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
782 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
783 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300784
785 /* if assoclen is ZERO, skip reading the assoc data */
Herbert Xuf2147b82015-06-16 13:54:23 +0800786 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300787 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800788 JUMP_COND_MATH_Z);
789
790 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
791
792 /* skip assoc data */
793 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
794
795 /* cryptlen = seqinlen - assoclen */
796 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
797
798 /* if cryptlen is ZERO jump to zero-payload commands */
799 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
800 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300801
802 /* read assoc data */
803 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
804 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
805 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
806
Herbert Xuf2147b82015-06-16 13:54:23 +0800807 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300808
809 /* write encrypted data */
810 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
811
812 /* read payload data */
813 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
815
816 /* jump the zero-payload commands */
Herbert Xuf2147b82015-06-16 13:54:23 +0800817 append_jump(desc, JUMP_TEST_ALL | 2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300818
819 /* zero-payload commands */
820 set_jump_tgt_here(desc, zero_payload_jump_cmd);
821
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300822 /* read assoc data */
823 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
824 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
825
Herbert Xuf2147b82015-06-16 13:54:23 +0800826 /* There is no input data */
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300827 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300828
829 /* write ICV */
830 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
831 LDST_SRCDST_BYTE_CONTEXT);
832
833 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
834 desc_bytes(desc),
835 DMA_TO_DEVICE);
836 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
837 dev_err(jrdev, "unable to map shared descriptor\n");
838 return -ENOMEM;
839 }
840#ifdef DEBUG
841 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
842 DUMP_PREFIX_ADDRESS, 16, 4, desc,
843 desc_bytes(desc), 1);
844#endif
845
846 /*
847 * Job Descriptor and Shared Descriptors
848 * must all fit into the 64-word Descriptor h/w Buffer
849 */
850 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800851 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300852 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
853 keys_fit_inline = true;
854
855 desc = ctx->sh_desc_dec;
856
857 init_sh_desc(desc, HDR_SHARE_SERIAL);
858
859 /* skip key loading if they are loaded due to sharing */
860 key_jump_cmd = append_jump(desc, JUMP_JSL |
861 JUMP_TEST_ALL | JUMP_COND_SHRD |
862 JUMP_COND_SELF);
863 if (keys_fit_inline)
864 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
865 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
866 else
867 append_key(desc, ctx->key_dma, ctx->enckeylen,
868 CLASS_1 | KEY_DEST_CLASS_REG);
869 set_jump_tgt_here(desc, key_jump_cmd);
870
871 /* class 1 operation */
872 append_operation(desc, ctx->class1_alg_type |
873 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
874
Herbert Xuf2147b82015-06-16 13:54:23 +0800875 /* if assoclen is ZERO, skip reading the assoc data */
876 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300877 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800878 JUMP_COND_MATH_Z);
879
880 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
881
882 /* skip assoc data */
883 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
884
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300885 /* read assoc data */
886 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
887 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
Herbert Xuf2147b82015-06-16 13:54:23 +0800888
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300889 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
890
Herbert Xuf2147b82015-06-16 13:54:23 +0800891 /* cryptlen = seqoutlen - assoclen */
892 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
893
894 /* jump to zero-payload command if cryptlen is zero */
895 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
896 JUMP_COND_MATH_Z);
897
898 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300899
900 /* store encrypted data */
901 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
902
903 /* read payload data */
904 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
905 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
906
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300907 /* zero-payload command */
908 set_jump_tgt_here(desc, zero_payload_jump_cmd);
909
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300910 /* read ICV */
911 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
912 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
913
914 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
915 desc_bytes(desc),
916 DMA_TO_DEVICE);
917 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
918 dev_err(jrdev, "unable to map shared descriptor\n");
919 return -ENOMEM;
920 }
921#ifdef DEBUG
922 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
923 DUMP_PREFIX_ADDRESS, 16, 4, desc,
924 desc_bytes(desc), 1);
925#endif
926
927 return 0;
928}
929
930static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
931{
932 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
933
934 ctx->authsize = authsize;
935 gcm_set_sh_desc(authenc);
936
937 return 0;
938}
939
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300940static int rfc4106_set_sh_desc(struct crypto_aead *aead)
941{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300942 struct caam_ctx *ctx = crypto_aead_ctx(aead);
943 struct device *jrdev = ctx->jrdev;
944 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800945 u32 *key_jump_cmd;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300946 u32 *desc;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300947
948 if (!ctx->enckeylen || !ctx->authsize)
949 return 0;
950
951 /*
952 * RFC4106 encrypt shared descriptor
953 * Job Descriptor and Shared Descriptor
954 * must fit into the 64-word Descriptor h/w Buffer
955 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800956 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300957 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
958 keys_fit_inline = true;
959
960 desc = ctx->sh_desc_enc;
961
962 init_sh_desc(desc, HDR_SHARE_SERIAL);
963
964 /* Skip key loading if it is loaded due to sharing */
965 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
966 JUMP_COND_SHRD);
967 if (keys_fit_inline)
968 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
969 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
970 else
971 append_key(desc, ctx->key_dma, ctx->enckeylen,
972 CLASS_1 | KEY_DEST_CLASS_REG);
973 set_jump_tgt_here(desc, key_jump_cmd);
974
975 /* Class 1 operation */
976 append_operation(desc, ctx->class1_alg_type |
977 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
978
Herbert Xu46218752015-07-09 07:17:33 +0800979 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300980 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
981
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300982 /* Read assoc data */
983 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
984 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
985
Herbert Xu46218752015-07-09 07:17:33 +0800986 /* Skip IV */
987 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +0800988
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300989 /* Will read cryptlen bytes */
Herbert Xuf2147b82015-06-16 13:54:23 +0800990 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300991
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300992 /* Read payload data */
993 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
994 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
995
Herbert Xu46218752015-07-09 07:17:33 +0800996 /* Skip assoc data */
997 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
998
999 /* cryptlen = seqoutlen - assoclen */
1000 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1001
1002 /* Write encrypted data */
1003 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1004
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001005 /* Write ICV */
1006 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1007 LDST_SRCDST_BYTE_CONTEXT);
1008
1009 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1010 desc_bytes(desc),
1011 DMA_TO_DEVICE);
1012 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1013 dev_err(jrdev, "unable to map shared descriptor\n");
1014 return -ENOMEM;
1015 }
1016#ifdef DEBUG
1017 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1018 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1019 desc_bytes(desc), 1);
1020#endif
1021
1022 /*
1023 * Job Descriptor and Shared Descriptors
1024 * must all fit into the 64-word Descriptor h/w Buffer
1025 */
1026 keys_fit_inline = false;
1027 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1028 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1029 keys_fit_inline = true;
1030
1031 desc = ctx->sh_desc_dec;
1032
1033 init_sh_desc(desc, HDR_SHARE_SERIAL);
1034
1035 /* Skip key loading if it is loaded due to sharing */
1036 key_jump_cmd = append_jump(desc, JUMP_JSL |
1037 JUMP_TEST_ALL | JUMP_COND_SHRD);
1038 if (keys_fit_inline)
1039 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1040 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1041 else
1042 append_key(desc, ctx->key_dma, ctx->enckeylen,
1043 CLASS_1 | KEY_DEST_CLASS_REG);
1044 set_jump_tgt_here(desc, key_jump_cmd);
1045
1046 /* Class 1 operation */
1047 append_operation(desc, ctx->class1_alg_type |
1048 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1049
Herbert Xu46218752015-07-09 07:17:33 +08001050 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Herbert Xuf2147b82015-06-16 13:54:23 +08001051 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001052
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001053 /* Read assoc data */
1054 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1055 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1056
Herbert Xu46218752015-07-09 07:17:33 +08001057 /* Skip IV */
1058 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001059
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001060 /* Will read cryptlen bytes */
Herbert Xu46218752015-07-09 07:17:33 +08001061 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001062
1063 /* Read encrypted data */
1064 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1065 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1066
Herbert Xu46218752015-07-09 07:17:33 +08001067 /* Skip assoc data */
1068 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1069
1070 /* Will write cryptlen bytes */
1071 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1072
1073 /* Store payload data */
1074 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1075
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001076 /* Read ICV */
1077 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1078 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1079
1080 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1081 desc_bytes(desc),
1082 DMA_TO_DEVICE);
1083 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1084 dev_err(jrdev, "unable to map shared descriptor\n");
1085 return -ENOMEM;
1086 }
1087#ifdef DEBUG
1088 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1089 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1090 desc_bytes(desc), 1);
1091#endif
1092
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001093 return 0;
1094}
1095
1096static int rfc4106_setauthsize(struct crypto_aead *authenc,
1097 unsigned int authsize)
1098{
1099 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1100
1101 ctx->authsize = authsize;
1102 rfc4106_set_sh_desc(authenc);
1103
1104 return 0;
1105}
1106
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001107static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1108{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001109 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1110 struct device *jrdev = ctx->jrdev;
1111 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001112 u32 *key_jump_cmd;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001113 u32 *read_move_cmd, *write_move_cmd;
1114 u32 *desc;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001115
1116 if (!ctx->enckeylen || !ctx->authsize)
1117 return 0;
1118
1119 /*
1120 * RFC4543 encrypt shared descriptor
1121 * Job Descriptor and Shared Descriptor
1122 * must fit into the 64-word Descriptor h/w Buffer
1123 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001124 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001125 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1126 keys_fit_inline = true;
1127
1128 desc = ctx->sh_desc_enc;
1129
1130 init_sh_desc(desc, HDR_SHARE_SERIAL);
1131
1132 /* Skip key loading if it is loaded due to sharing */
1133 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1134 JUMP_COND_SHRD);
1135 if (keys_fit_inline)
1136 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1137 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1138 else
1139 append_key(desc, ctx->key_dma, ctx->enckeylen,
1140 CLASS_1 | KEY_DEST_CLASS_REG);
1141 set_jump_tgt_here(desc, key_jump_cmd);
1142
1143 /* Class 1 operation */
1144 append_operation(desc, ctx->class1_alg_type |
1145 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1146
Herbert Xuf2147b82015-06-16 13:54:23 +08001147 /* assoclen + cryptlen = seqinlen */
1148 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001149
1150 /*
1151 * MOVE_LEN opcode is not available in all SEC HW revisions,
1152 * thus need to do some magic, i.e. self-patch the descriptor
1153 * buffer.
1154 */
1155 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1156 (0x6 << MOVE_LEN_SHIFT));
1157 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1158 (0x8 << MOVE_LEN_SHIFT));
1159
Herbert Xuf2147b82015-06-16 13:54:23 +08001160 /* Will read assoclen + cryptlen bytes */
1161 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001162
Herbert Xuf2147b82015-06-16 13:54:23 +08001163 /* Will write assoclen + cryptlen bytes */
1164 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1165
1166 /* Read and write assoclen + cryptlen bytes */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001167 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1168
1169 set_move_tgt_here(desc, read_move_cmd);
1170 set_move_tgt_here(desc, write_move_cmd);
1171 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1172 /* Move payload data to OFIFO */
1173 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1174
1175 /* Write ICV */
1176 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1177 LDST_SRCDST_BYTE_CONTEXT);
1178
1179 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1180 desc_bytes(desc),
1181 DMA_TO_DEVICE);
1182 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1183 dev_err(jrdev, "unable to map shared descriptor\n");
1184 return -ENOMEM;
1185 }
1186#ifdef DEBUG
1187 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1188 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1189 desc_bytes(desc), 1);
1190#endif
1191
1192 /*
1193 * Job Descriptor and Shared Descriptors
1194 * must all fit into the 64-word Descriptor h/w Buffer
1195 */
1196 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001197 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001198 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1199 keys_fit_inline = true;
1200
1201 desc = ctx->sh_desc_dec;
1202
1203 init_sh_desc(desc, HDR_SHARE_SERIAL);
1204
1205 /* Skip key loading if it is loaded due to sharing */
1206 key_jump_cmd = append_jump(desc, JUMP_JSL |
1207 JUMP_TEST_ALL | JUMP_COND_SHRD);
1208 if (keys_fit_inline)
1209 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1210 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1211 else
1212 append_key(desc, ctx->key_dma, ctx->enckeylen,
1213 CLASS_1 | KEY_DEST_CLASS_REG);
1214 set_jump_tgt_here(desc, key_jump_cmd);
1215
1216 /* Class 1 operation */
1217 append_operation(desc, ctx->class1_alg_type |
1218 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1219
Herbert Xuf2147b82015-06-16 13:54:23 +08001220 /* assoclen + cryptlen = seqoutlen */
1221 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001222
1223 /*
1224 * MOVE_LEN opcode is not available in all SEC HW revisions,
1225 * thus need to do some magic, i.e. self-patch the descriptor
1226 * buffer.
1227 */
1228 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1229 (0x6 << MOVE_LEN_SHIFT));
1230 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1231 (0x8 << MOVE_LEN_SHIFT));
1232
Herbert Xuf2147b82015-06-16 13:54:23 +08001233 /* Will read assoclen + cryptlen bytes */
1234 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001235
Herbert Xuf2147b82015-06-16 13:54:23 +08001236 /* Will write assoclen + cryptlen bytes */
1237 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001238
1239 /* Store payload data */
1240 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1241
Herbert Xuf2147b82015-06-16 13:54:23 +08001242 /* In-snoop assoclen + cryptlen data */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001243 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1244 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1245
1246 set_move_tgt_here(desc, read_move_cmd);
1247 set_move_tgt_here(desc, write_move_cmd);
1248 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1249 /* Move payload data to OFIFO */
1250 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1251 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1252
1253 /* Read ICV */
1254 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1255 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1256
1257 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1258 desc_bytes(desc),
1259 DMA_TO_DEVICE);
1260 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1261 dev_err(jrdev, "unable to map shared descriptor\n");
1262 return -ENOMEM;
1263 }
1264#ifdef DEBUG
1265 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1266 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1267 desc_bytes(desc), 1);
1268#endif
1269
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001270 return 0;
1271}
1272
1273static int rfc4543_setauthsize(struct crypto_aead *authenc,
1274 unsigned int authsize)
1275{
1276 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1277
1278 ctx->authsize = authsize;
1279 rfc4543_set_sh_desc(authenc);
1280
1281 return 0;
1282}
1283
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001284static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1285 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001286{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001287 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1288 ctx->split_key_pad_len, key_in, authkeylen,
1289 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001290}
1291
Yuan Kang0e479302011-07-15 11:21:41 +08001292static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001293 const u8 *key, unsigned int keylen)
1294{
1295 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1296 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1297 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1298 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001299 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001300 int ret = 0;
1301
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001302 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001303 goto badkey;
1304
1305 /* Pick class 2 key length from algorithm submask */
1306 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1307 OP_ALG_ALGSEL_SHIFT] * 2;
1308 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1309
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001310 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1311 goto badkey;
1312
Kim Phillips8e8ec592011-03-13 16:54:26 +08001313#ifdef DEBUG
1314 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001315 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1316 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001317 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1318 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001319 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001320 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1321#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001322
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001323 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001324 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001325 goto badkey;
1326 }
1327
1328 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001329 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001330
Yuan Kang885e9e22011-07-15 11:21:41 +08001331 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001332 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001333 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001334 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001335 return -ENOMEM;
1336 }
1337#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001338 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001339 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001340 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001341#endif
1342
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001343 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001344
Yuan Kang1acebad2011-07-15 11:21:42 +08001345 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001346 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001347 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001348 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001349 }
1350
1351 return ret;
1352badkey:
1353 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1354 return -EINVAL;
1355}
1356
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001357static int gcm_setkey(struct crypto_aead *aead,
1358 const u8 *key, unsigned int keylen)
1359{
1360 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1361 struct device *jrdev = ctx->jrdev;
1362 int ret = 0;
1363
1364#ifdef DEBUG
1365 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1366 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1367#endif
1368
1369 memcpy(ctx->key, key, keylen);
1370 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1371 DMA_TO_DEVICE);
1372 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1373 dev_err(jrdev, "unable to map key i/o memory\n");
1374 return -ENOMEM;
1375 }
1376 ctx->enckeylen = keylen;
1377
1378 ret = gcm_set_sh_desc(aead);
1379 if (ret) {
1380 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1381 DMA_TO_DEVICE);
1382 }
1383
1384 return ret;
1385}
1386
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001387static int rfc4106_setkey(struct crypto_aead *aead,
1388 const u8 *key, unsigned int keylen)
1389{
1390 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1391 struct device *jrdev = ctx->jrdev;
1392 int ret = 0;
1393
1394 if (keylen < 4)
1395 return -EINVAL;
1396
1397#ifdef DEBUG
1398 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1399 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1400#endif
1401
1402 memcpy(ctx->key, key, keylen);
1403
1404 /*
1405 * The last four bytes of the key material are used as the salt value
1406 * in the nonce. Update the AES key length.
1407 */
1408 ctx->enckeylen = keylen - 4;
1409
1410 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1411 DMA_TO_DEVICE);
1412 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1413 dev_err(jrdev, "unable to map key i/o memory\n");
1414 return -ENOMEM;
1415 }
1416
1417 ret = rfc4106_set_sh_desc(aead);
1418 if (ret) {
1419 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1420 DMA_TO_DEVICE);
1421 }
1422
1423 return ret;
1424}
1425
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001426static int rfc4543_setkey(struct crypto_aead *aead,
1427 const u8 *key, unsigned int keylen)
1428{
1429 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1430 struct device *jrdev = ctx->jrdev;
1431 int ret = 0;
1432
1433 if (keylen < 4)
1434 return -EINVAL;
1435
1436#ifdef DEBUG
1437 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1438 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1439#endif
1440
1441 memcpy(ctx->key, key, keylen);
1442
1443 /*
1444 * The last four bytes of the key material are used as the salt value
1445 * in the nonce. Update the AES key length.
1446 */
1447 ctx->enckeylen = keylen - 4;
1448
1449 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1450 DMA_TO_DEVICE);
1451 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1452 dev_err(jrdev, "unable to map key i/o memory\n");
1453 return -ENOMEM;
1454 }
1455
1456 ret = rfc4543_set_sh_desc(aead);
1457 if (ret) {
1458 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1459 DMA_TO_DEVICE);
1460 }
1461
1462 return ret;
1463}
1464
Yuan Kangacdca312011-07-15 11:21:42 +08001465static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1466 const u8 *key, unsigned int keylen)
1467{
1468 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001469 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1470 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1471 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001472 struct device *jrdev = ctx->jrdev;
1473 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001474 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001475 u32 *desc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001476 u32 *nonce;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001477 u32 geniv;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001478 u32 ctx1_iv_off = 0;
1479 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1480 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001481 const bool is_rfc3686 = (ctr_mode &&
1482 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001483
1484#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001485 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001486 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1487#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001488 /*
1489 * AES-CTR needs to load IV in CONTEXT1 reg
1490 * at an offset of 128bits (16bytes)
1491 * CONTEXT1[255:128] = IV
1492 */
1493 if (ctr_mode)
1494 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001495
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001496 /*
1497 * RFC3686 specific:
1498 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1499 * | *key = {KEY, NONCE}
1500 */
1501 if (is_rfc3686) {
1502 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1503 keylen -= CTR_RFC3686_NONCE_SIZE;
1504 }
1505
Yuan Kangacdca312011-07-15 11:21:42 +08001506 memcpy(ctx->key, key, keylen);
1507 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1508 DMA_TO_DEVICE);
1509 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1510 dev_err(jrdev, "unable to map key i/o memory\n");
1511 return -ENOMEM;
1512 }
1513 ctx->enckeylen = keylen;
1514
1515 /* ablkcipher_encrypt shared descriptor */
1516 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001517 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001518 /* Skip if already shared */
1519 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1520 JUMP_COND_SHRD);
1521
1522 /* Load class1 key only */
1523 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1524 ctx->enckeylen, CLASS_1 |
1525 KEY_DEST_CLASS_REG);
1526
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001527 /* Load nonce into CONTEXT1 reg */
1528 if (is_rfc3686) {
1529 nonce = (u32 *)(key + keylen);
1530 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1531 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1532 append_move(desc, MOVE_WAITCOMP |
1533 MOVE_SRC_OUTFIFO |
1534 MOVE_DEST_CLASS1CTX |
1535 (16 << MOVE_OFFSET_SHIFT) |
1536 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1537 }
1538
Yuan Kangacdca312011-07-15 11:21:42 +08001539 set_jump_tgt_here(desc, key_jump_cmd);
1540
Yuan Kangacdca312011-07-15 11:21:42 +08001541 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001542 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001543 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001544
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001545 /* Load counter into CONTEXT1 reg */
1546 if (is_rfc3686)
1547 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1548 LDST_CLASS_1_CCB |
1549 LDST_SRCDST_BYTE_CONTEXT |
1550 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1551 LDST_OFFSET_SHIFT));
1552
Yuan Kangacdca312011-07-15 11:21:42 +08001553 /* Load operation */
1554 append_operation(desc, ctx->class1_alg_type |
1555 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1556
1557 /* Perform operation */
1558 ablkcipher_append_src_dst(desc);
1559
1560 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1561 desc_bytes(desc),
1562 DMA_TO_DEVICE);
1563 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1564 dev_err(jrdev, "unable to map shared descriptor\n");
1565 return -ENOMEM;
1566 }
1567#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001568 print_hex_dump(KERN_ERR,
1569 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001570 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1571 desc_bytes(desc), 1);
1572#endif
1573 /* ablkcipher_decrypt shared descriptor */
1574 desc = ctx->sh_desc_dec;
1575
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001576 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001577 /* Skip if already shared */
1578 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1579 JUMP_COND_SHRD);
1580
1581 /* Load class1 key only */
1582 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1583 ctx->enckeylen, CLASS_1 |
1584 KEY_DEST_CLASS_REG);
1585
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001586 /* Load nonce into CONTEXT1 reg */
1587 if (is_rfc3686) {
1588 nonce = (u32 *)(key + keylen);
1589 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1590 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1591 append_move(desc, MOVE_WAITCOMP |
1592 MOVE_SRC_OUTFIFO |
1593 MOVE_DEST_CLASS1CTX |
1594 (16 << MOVE_OFFSET_SHIFT) |
1595 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1596 }
1597
Yuan Kangacdca312011-07-15 11:21:42 +08001598 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001599
1600 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001601 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001602 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001603
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001604 /* Load counter into CONTEXT1 reg */
1605 if (is_rfc3686)
1606 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1607 LDST_CLASS_1_CCB |
1608 LDST_SRCDST_BYTE_CONTEXT |
1609 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1610 LDST_OFFSET_SHIFT));
1611
Yuan Kangacdca312011-07-15 11:21:42 +08001612 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001613 if (ctr_mode)
1614 append_operation(desc, ctx->class1_alg_type |
1615 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1616 else
1617 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001618
1619 /* Perform operation */
1620 ablkcipher_append_src_dst(desc);
1621
Yuan Kangacdca312011-07-15 11:21:42 +08001622 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1623 desc_bytes(desc),
1624 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001625 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001626 dev_err(jrdev, "unable to map shared descriptor\n");
1627 return -ENOMEM;
1628 }
1629
1630#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001631 print_hex_dump(KERN_ERR,
1632 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001633 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1634 desc_bytes(desc), 1);
1635#endif
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001636 /* ablkcipher_givencrypt shared descriptor */
1637 desc = ctx->sh_desc_givenc;
1638
1639 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1640 /* Skip if already shared */
1641 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1642 JUMP_COND_SHRD);
1643
1644 /* Load class1 key only */
1645 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1646 ctx->enckeylen, CLASS_1 |
1647 KEY_DEST_CLASS_REG);
1648
1649 /* Load Nonce into CONTEXT1 reg */
1650 if (is_rfc3686) {
1651 nonce = (u32 *)(key + keylen);
1652 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1653 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1654 append_move(desc, MOVE_WAITCOMP |
1655 MOVE_SRC_OUTFIFO |
1656 MOVE_DEST_CLASS1CTX |
1657 (16 << MOVE_OFFSET_SHIFT) |
1658 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1659 }
1660 set_jump_tgt_here(desc, key_jump_cmd);
1661
1662 /* Generate IV */
1663 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1664 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1665 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1666 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1667 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1668 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1669 append_move(desc, MOVE_WAITCOMP |
1670 MOVE_SRC_INFIFO |
1671 MOVE_DEST_CLASS1CTX |
1672 (crt->ivsize << MOVE_LEN_SHIFT) |
1673 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1674 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1675
1676 /* Copy generated IV to memory */
1677 append_seq_store(desc, crt->ivsize,
1678 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1679 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1680
1681 /* Load Counter into CONTEXT1 reg */
1682 if (is_rfc3686)
1683 append_load_imm_u32(desc, (u32)1, LDST_IMM |
1684 LDST_CLASS_1_CCB |
1685 LDST_SRCDST_BYTE_CONTEXT |
1686 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1687 LDST_OFFSET_SHIFT));
1688
1689 if (ctx1_iv_off)
1690 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1691 (1 << JUMP_OFFSET_SHIFT));
1692
1693 /* Load operation */
1694 append_operation(desc, ctx->class1_alg_type |
1695 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1696
1697 /* Perform operation */
1698 ablkcipher_append_src_dst(desc);
1699
1700 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1701 desc_bytes(desc),
1702 DMA_TO_DEVICE);
1703 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1704 dev_err(jrdev, "unable to map shared descriptor\n");
1705 return -ENOMEM;
1706 }
1707#ifdef DEBUG
1708 print_hex_dump(KERN_ERR,
1709 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1710 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1711 desc_bytes(desc), 1);
1712#endif
Yuan Kangacdca312011-07-15 11:21:42 +08001713
1714 return ret;
1715}
1716
Kim Phillips8e8ec592011-03-13 16:54:26 +08001717/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001718 * aead_edesc - s/w-extended aead descriptor
1719 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001720 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001721 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001722 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001723 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001724 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001725 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001726 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001727 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1728 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001729 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1730 */
Yuan Kang0e479302011-07-15 11:21:41 +08001731struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001732 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001733 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001734 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001735 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001736 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001737 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001738 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001739 int sec4_sg_bytes;
1740 dma_addr_t sec4_sg_dma;
1741 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +08001742 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +08001743};
1744
Yuan Kangacdca312011-07-15 11:21:42 +08001745/*
1746 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1747 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001748 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001749 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001750 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001751 * @iv_dma: dma address of iv for checking continuity and link table
1752 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001753 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1754 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001755 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1756 */
1757struct ablkcipher_edesc {
1758 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001759 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001760 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001761 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001762 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001763 int sec4_sg_bytes;
1764 dma_addr_t sec4_sg_dma;
1765 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001766 u32 hw_desc[0];
1767};
1768
Yuan Kang1acebad2011-07-15 11:21:42 +08001769static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001770 struct scatterlist *dst, int src_nents,
1771 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001772 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1773 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001774{
Yuan Kang643b39b2012-06-22 19:48:49 -05001775 if (dst != src) {
1776 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1777 src_chained);
1778 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1779 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001780 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001781 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1782 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001783 }
1784
Yuan Kang1acebad2011-07-15 11:21:42 +08001785 if (iv_dma)
1786 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001787 if (sec4_sg_bytes)
1788 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001789 DMA_TO_DEVICE);
1790}
1791
Yuan Kang1acebad2011-07-15 11:21:42 +08001792static void aead_unmap(struct device *dev,
1793 struct aead_edesc *edesc,
1794 struct aead_request *req)
1795{
Herbert Xuf2147b82015-06-16 13:54:23 +08001796 caam_unmap(dev, req->src, req->dst,
1797 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1798 edesc->dst_chained, 0, 0,
1799 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1800}
1801
1802static void old_aead_unmap(struct device *dev,
1803 struct aead_edesc *edesc,
1804 struct aead_request *req)
1805{
Yuan Kang1acebad2011-07-15 11:21:42 +08001806 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1807 int ivsize = crypto_aead_ivsize(aead);
1808
Yuan Kang643b39b2012-06-22 19:48:49 -05001809 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1810 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001811
1812 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001813 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1814 edesc->dst_chained, edesc->iv_dma, ivsize,
1815 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001816}
1817
Yuan Kangacdca312011-07-15 11:21:42 +08001818static void ablkcipher_unmap(struct device *dev,
1819 struct ablkcipher_edesc *edesc,
1820 struct ablkcipher_request *req)
1821{
1822 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1823 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1824
1825 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001826 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1827 edesc->dst_chained, edesc->iv_dma, ivsize,
1828 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001829}
1830
Yuan Kang0e479302011-07-15 11:21:41 +08001831static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001832 void *context)
1833{
Yuan Kang0e479302011-07-15 11:21:41 +08001834 struct aead_request *req = context;
1835 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001836
1837#ifdef DEBUG
1838 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1839#endif
1840
1841 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1842
1843 if (err)
1844 caam_jr_strstatus(jrdev, err);
1845
1846 aead_unmap(jrdev, edesc, req);
1847
1848 kfree(edesc);
1849
1850 aead_request_complete(req, err);
1851}
1852
1853static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1854 void *context)
1855{
1856 struct aead_request *req = context;
1857 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001858#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001859 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001860 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001861 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001862
1863 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1864#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001865
Yuan Kang0e479302011-07-15 11:21:41 +08001866 edesc = (struct aead_edesc *)((char *)desc -
1867 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001868
Marek Vasutfa9659c2014-04-24 20:05:12 +02001869 if (err)
1870 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001871
Herbert Xuf2147b82015-06-16 13:54:23 +08001872 old_aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001873
1874#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001875 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001876 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1877 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001878 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001879 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001880 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001881 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001882 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1883 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001884 ctx->authsize + 4, 1);
1885#endif
1886
1887 kfree(edesc);
1888
Yuan Kang0e479302011-07-15 11:21:41 +08001889 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001890}
1891
Yuan Kang0e479302011-07-15 11:21:41 +08001892static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001893 void *context)
1894{
Yuan Kang0e479302011-07-15 11:21:41 +08001895 struct aead_request *req = context;
1896 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001897
1898#ifdef DEBUG
1899 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1900#endif
1901
1902 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1903
1904 if (err)
1905 caam_jr_strstatus(jrdev, err);
1906
1907 aead_unmap(jrdev, edesc, req);
1908
1909 /*
1910 * verify hw auth check passed else return -EBADMSG
1911 */
1912 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1913 err = -EBADMSG;
1914
1915 kfree(edesc);
1916
1917 aead_request_complete(req, err);
1918}
1919
1920static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1921 void *context)
1922{
1923 struct aead_request *req = context;
1924 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001925#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001926 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001927 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001928 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001929
1930 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1931#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001932
Yuan Kang0e479302011-07-15 11:21:41 +08001933 edesc = (struct aead_edesc *)((char *)desc -
1934 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001935
Yuan Kang1acebad2011-07-15 11:21:42 +08001936#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001937 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001938 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1939 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001940 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001941 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02001942 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001943#endif
1944
Marek Vasutfa9659c2014-04-24 20:05:12 +02001945 if (err)
1946 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001947
Herbert Xuf2147b82015-06-16 13:54:23 +08001948 old_aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001949
1950 /*
1951 * verify hw auth check passed else return -EBADMSG
1952 */
1953 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1954 err = -EBADMSG;
1955
1956#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001957 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001958 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08001959 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
1960 sizeof(struct iphdr) + req->assoclen +
1961 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001962 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05001963 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08001964 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03001965 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001966 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1967 sg->length + ctx->authsize + 16, 1);
1968 }
1969#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001970
Kim Phillips8e8ec592011-03-13 16:54:26 +08001971 kfree(edesc);
1972
Yuan Kang0e479302011-07-15 11:21:41 +08001973 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001974}
1975
Yuan Kangacdca312011-07-15 11:21:42 +08001976static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1977 void *context)
1978{
1979 struct ablkcipher_request *req = context;
1980 struct ablkcipher_edesc *edesc;
1981#ifdef DEBUG
1982 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1983 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1984
1985 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1986#endif
1987
1988 edesc = (struct ablkcipher_edesc *)((char *)desc -
1989 offsetof(struct ablkcipher_edesc, hw_desc));
1990
Marek Vasutfa9659c2014-04-24 20:05:12 +02001991 if (err)
1992 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001993
1994#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001995 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001996 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1997 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001998 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001999 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2000 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2001#endif
2002
2003 ablkcipher_unmap(jrdev, edesc, req);
2004 kfree(edesc);
2005
2006 ablkcipher_request_complete(req, err);
2007}
2008
2009static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2010 void *context)
2011{
2012 struct ablkcipher_request *req = context;
2013 struct ablkcipher_edesc *edesc;
2014#ifdef DEBUG
2015 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2016 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2017
2018 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2019#endif
2020
2021 edesc = (struct ablkcipher_edesc *)((char *)desc -
2022 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002023 if (err)
2024 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002025
2026#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002027 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002028 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2029 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002030 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002031 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2032 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2033#endif
2034
2035 ablkcipher_unmap(jrdev, edesc, req);
2036 kfree(edesc);
2037
2038 ablkcipher_request_complete(req, err);
2039}
2040
Kim Phillips8e8ec592011-03-13 16:54:26 +08002041/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002042 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002043 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002044static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2045 struct aead_edesc *edesc,
2046 struct aead_request *req,
2047 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002048{
Yuan Kang0e479302011-07-15 11:21:41 +08002049 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002050 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002051 int ivsize = crypto_aead_ivsize(aead);
2052 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08002053 u32 *desc = edesc->hw_desc;
2054 u32 out_options = 0, in_options;
2055 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002056 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002057 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002058
Yuan Kang1acebad2011-07-15 11:21:42 +08002059#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08002060 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08002061 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002062 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002063 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2064 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002065 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002066 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002067 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002068 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002069 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08002070 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002071 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002072 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2073 desc_bytes(sh_desc), 1);
2074#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002075
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002076 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2077 OP_ALG_ALGSEL_AES) &&
2078 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2079 is_gcm = true;
2080
Yuan Kang1acebad2011-07-15 11:21:42 +08002081 len = desc_len(sh_desc);
2082 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2083
2084 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002085 if (is_gcm)
2086 src_dma = edesc->iv_dma;
2087 else
2088 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002089 in_options = 0;
2090 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002091 src_dma = edesc->sec4_sg_dma;
2092 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2093 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002094 in_options = LDST_SGF;
2095 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002096
2097 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2098 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002099
Yuan Kang1acebad2011-07-15 11:21:42 +08002100 if (likely(req->src == req->dst)) {
2101 if (all_contig) {
2102 dst_dma = sg_dma_address(req->src);
2103 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002104 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08002105 ((edesc->assoc_nents ? : 1) + 1);
2106 out_options = LDST_SGF;
2107 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002108 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002109 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08002110 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002111 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002112 dst_dma = edesc->sec4_sg_dma +
2113 sec4_sg_index *
2114 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002115 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002116 }
2117 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002118 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02002119 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2120 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002121 else
Yuan Kang1acebad2011-07-15 11:21:42 +08002122 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2123 out_options);
2124}
2125
2126/*
Herbert Xuf2147b82015-06-16 13:54:23 +08002127 * Fill in aead job descriptor
2128 */
2129static void init_aead_job(struct aead_request *req,
2130 struct aead_edesc *edesc,
2131 bool all_contig, bool encrypt)
2132{
2133 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2134 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2135 int authsize = ctx->authsize;
2136 u32 *desc = edesc->hw_desc;
2137 u32 out_options, in_options;
2138 dma_addr_t dst_dma, src_dma;
2139 int len, sec4_sg_index = 0;
2140 dma_addr_t ptr;
2141 u32 *sh_desc;
2142
2143 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2144 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2145
2146 len = desc_len(sh_desc);
2147 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2148
2149 if (all_contig) {
2150 src_dma = sg_dma_address(req->src);
2151 in_options = 0;
2152 } else {
2153 src_dma = edesc->sec4_sg_dma;
2154 sec4_sg_index += edesc->src_nents;
2155 in_options = LDST_SGF;
2156 }
2157
2158 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2159 in_options);
2160
2161 dst_dma = src_dma;
2162 out_options = in_options;
2163
2164 if (unlikely(req->src != req->dst)) {
2165 if (!edesc->dst_nents) {
2166 dst_dma = sg_dma_address(req->dst);
2167 } else {
2168 dst_dma = edesc->sec4_sg_dma +
2169 sec4_sg_index *
2170 sizeof(struct sec4_sg_entry);
2171 out_options = LDST_SGF;
2172 }
2173 }
2174
2175 if (encrypt)
2176 append_seq_out_ptr(desc, dst_dma,
2177 req->assoclen + req->cryptlen + authsize,
2178 out_options);
2179 else
2180 append_seq_out_ptr(desc, dst_dma,
2181 req->assoclen + req->cryptlen - authsize,
2182 out_options);
2183
2184 /* REG3 = assoclen */
2185 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2186}
2187
2188static void init_gcm_job(struct aead_request *req,
2189 struct aead_edesc *edesc,
2190 bool all_contig, bool encrypt)
2191{
2192 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2193 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2194 unsigned int ivsize = crypto_aead_ivsize(aead);
2195 u32 *desc = edesc->hw_desc;
2196 bool generic_gcm = (ivsize == 12);
2197 unsigned int last;
2198
2199 init_aead_job(req, edesc, all_contig, encrypt);
2200
2201 /* BUG This should not be specific to generic GCM. */
2202 last = 0;
2203 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2204 last = FIFOLD_TYPE_LAST1;
2205
2206 /* Read GCM IV */
2207 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2208 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2209 /* Append Salt */
2210 if (!generic_gcm)
2211 append_data(desc, ctx->key + ctx->enckeylen, 4);
2212 /* Append IV */
2213 append_data(desc, req->iv, ivsize);
2214 /* End of blank commands */
2215}
2216
2217/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002218 * Fill in aead givencrypt job descriptor
2219 */
2220static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2221 struct aead_edesc *edesc,
2222 struct aead_request *req,
2223 int contig)
2224{
2225 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2226 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2227 int ivsize = crypto_aead_ivsize(aead);
2228 int authsize = ctx->authsize;
2229 u32 *desc = edesc->hw_desc;
2230 u32 out_options = 0, in_options;
2231 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002232 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002233 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002234
2235#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08002236 debug("assoclen %d cryptlen %d authsize %d\n",
2237 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002238 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002239 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2240 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002241 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002242 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002243 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002244 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2245 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002246 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002247 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2248 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002249#endif
2250
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002251 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2252 OP_ALG_ALGSEL_AES) &&
2253 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2254 is_gcm = true;
2255
Yuan Kang1acebad2011-07-15 11:21:42 +08002256 len = desc_len(sh_desc);
2257 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2258
2259 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002260 if (is_gcm)
2261 src_dma = edesc->iv_dma;
2262 else
2263 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002264 in_options = 0;
2265 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002266 src_dma = edesc->sec4_sg_dma;
2267 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002268 in_options = LDST_SGF;
2269 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002270 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2271 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08002272
2273 if (contig & GIV_DST_CONTIG) {
2274 dst_dma = edesc->iv_dma;
2275 } else {
2276 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05002277 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002278 (edesc->assoc_nents +
2279 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08002280 out_options = LDST_SGF;
2281 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002282 dst_dma = edesc->sec4_sg_dma +
2283 sec4_sg_index *
2284 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002285 out_options = LDST_SGF;
2286 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002287 }
2288
Horia Geantabbf9c892013-11-28 15:11:16 +02002289 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2290 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002291}
2292
2293/*
Yuan Kangacdca312011-07-15 11:21:42 +08002294 * Fill in ablkcipher job descriptor
2295 */
2296static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2297 struct ablkcipher_edesc *edesc,
2298 struct ablkcipher_request *req,
2299 bool iv_contig)
2300{
2301 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2302 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2303 u32 *desc = edesc->hw_desc;
2304 u32 out_options = 0, in_options;
2305 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002306 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002307
2308#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002309 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002310 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2311 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002312 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002313 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2314 edesc->src_nents ? 100 : req->nbytes, 1);
2315#endif
2316
2317 len = desc_len(sh_desc);
2318 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2319
2320 if (iv_contig) {
2321 src_dma = edesc->iv_dma;
2322 in_options = 0;
2323 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002324 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02002325 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08002326 in_options = LDST_SGF;
2327 }
2328 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2329
2330 if (likely(req->src == req->dst)) {
2331 if (!edesc->src_nents && iv_contig) {
2332 dst_dma = sg_dma_address(req->src);
2333 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002334 dst_dma = edesc->sec4_sg_dma +
2335 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002336 out_options = LDST_SGF;
2337 }
2338 } else {
2339 if (!edesc->dst_nents) {
2340 dst_dma = sg_dma_address(req->dst);
2341 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002342 dst_dma = edesc->sec4_sg_dma +
2343 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002344 out_options = LDST_SGF;
2345 }
2346 }
2347 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2348}
2349
2350/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002351 * Fill in ablkcipher givencrypt job descriptor
2352 */
2353static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2354 struct ablkcipher_edesc *edesc,
2355 struct ablkcipher_request *req,
2356 bool iv_contig)
2357{
2358 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2359 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2360 u32 *desc = edesc->hw_desc;
2361 u32 out_options, in_options;
2362 dma_addr_t dst_dma, src_dma;
2363 int len, sec4_sg_index = 0;
2364
2365#ifdef DEBUG
2366 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2367 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2368 ivsize, 1);
2369 print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2370 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2371 edesc->src_nents ? 100 : req->nbytes, 1);
2372#endif
2373
2374 len = desc_len(sh_desc);
2375 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2376
2377 if (!edesc->src_nents) {
2378 src_dma = sg_dma_address(req->src);
2379 in_options = 0;
2380 } else {
2381 src_dma = edesc->sec4_sg_dma;
2382 sec4_sg_index += edesc->src_nents;
2383 in_options = LDST_SGF;
2384 }
2385 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2386
2387 if (iv_contig) {
2388 dst_dma = edesc->iv_dma;
2389 out_options = 0;
2390 } else {
2391 dst_dma = edesc->sec4_sg_dma +
2392 sec4_sg_index * sizeof(struct sec4_sg_entry);
2393 out_options = LDST_SGF;
2394 }
2395 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2396}
2397
2398/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002399 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002400 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002401static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
2402 int desc_bytes,
2403 bool *all_contig_ptr,
2404 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002405{
Yuan Kang0e479302011-07-15 11:21:41 +08002406 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002407 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2408 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002409 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2410 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2411 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002412 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002413 dma_addr_t iv_dma = 0;
2414 int sgc;
2415 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05002416 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08002417 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05002418 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02002419 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002420 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002421
Yuan Kang643b39b2012-06-22 19:48:49 -05002422 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002423
Horia Geantabbf9c892013-11-28 15:11:16 +02002424 if (unlikely(req->dst != req->src)) {
2425 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2426 dst_nents = sg_count(req->dst,
2427 req->cryptlen +
2428 (encrypt ? authsize : (-authsize)),
2429 &dst_chained);
2430 } else {
2431 src_nents = sg_count(req->src,
2432 req->cryptlen +
2433 (encrypt ? authsize : 0),
2434 &src_chained);
2435 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002436
Yuan Kang643b39b2012-06-22 19:48:49 -05002437 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002438 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002439 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002440 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2441 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002442 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002443 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2444 DMA_TO_DEVICE, src_chained);
2445 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2446 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002447 }
2448
Yuan Kang1acebad2011-07-15 11:21:42 +08002449 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002450 if (dma_mapping_error(jrdev, iv_dma)) {
2451 dev_err(jrdev, "unable to map IV\n");
2452 return ERR_PTR(-ENOMEM);
2453 }
2454
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002455 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2456 OP_ALG_ALGSEL_AES) &&
2457 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2458 is_gcm = true;
2459
2460 /*
2461 * Check if data are contiguous.
2462 * GCM expected input sequence: IV, AAD, text
2463 * All other - expected input sequence: AAD, IV, text
2464 */
2465 if (is_gcm)
2466 all_contig = (!assoc_nents &&
2467 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2468 !src_nents && sg_dma_address(req->assoc) +
2469 req->assoclen == sg_dma_address(req->src));
2470 else
2471 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2472 req->assoclen == iv_dma && !src_nents &&
2473 iv_dma + ivsize == sg_dma_address(req->src));
2474 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08002475 assoc_nents = assoc_nents ? : 1;
2476 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002477 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002478 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002479
Yuan Kanga299c832012-06-22 19:48:46 -05002480 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002481
Yuan Kanga299c832012-06-22 19:48:46 -05002482 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002483
2484 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08002485 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002486 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002487 if (!edesc) {
2488 dev_err(jrdev, "could not allocate extended descriptor\n");
2489 return ERR_PTR(-ENOMEM);
2490 }
2491
2492 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002493 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002494 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002495 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002496 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002497 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002498 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002499 edesc->sec4_sg_bytes = sec4_sg_bytes;
2500 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2501 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002502 *all_contig_ptr = all_contig;
2503
Yuan Kanga299c832012-06-22 19:48:46 -05002504 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002505 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002506 if (!is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002507 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2508 edesc->sec4_sg + sec4_sg_index);
Cristian Stoica35b82e52015-01-21 11:53:30 +02002509 sec4_sg_index += assoc_nents;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002510 }
2511
Yuan Kanga299c832012-06-22 19:48:46 -05002512 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002513 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002514 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002515
2516 if (is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002517 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2518 edesc->sec4_sg + sec4_sg_index);
Cristian Stoica35b82e52015-01-21 11:53:30 +02002519 sec4_sg_index += assoc_nents;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002520 }
2521
Yuan Kanga299c832012-06-22 19:48:46 -05002522 sg_to_sec4_sg_last(req->src,
Cristian Stoica35b82e52015-01-21 11:53:30 +02002523 src_nents,
Yuan Kanga299c832012-06-22 19:48:46 -05002524 edesc->sec4_sg +
2525 sec4_sg_index, 0);
Cristian Stoica35b82e52015-01-21 11:53:30 +02002526 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002527 }
2528 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002529 sg_to_sec4_sg_last(req->dst, dst_nents,
2530 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002531 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302532 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2533 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002534 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2535 dev_err(jrdev, "unable to map S/G table\n");
2536 return ERR_PTR(-ENOMEM);
2537 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002538
2539 return edesc;
2540}
2541
Herbert Xuf2147b82015-06-16 13:54:23 +08002542/*
2543 * allocate and map the aead extended descriptor
2544 */
2545static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2546 int desc_bytes, bool *all_contig_ptr,
2547 bool encrypt)
2548{
2549 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2550 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2551 struct device *jrdev = ctx->jrdev;
2552 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2553 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2554 int src_nents, dst_nents = 0;
2555 struct aead_edesc *edesc;
2556 int sgc;
2557 bool all_contig = true;
2558 bool src_chained = false, dst_chained = false;
2559 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2560 unsigned int authsize = ctx->authsize;
2561
2562 if (unlikely(req->dst != req->src)) {
2563 src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
2564 &src_chained);
2565 dst_nents = sg_count(req->dst,
2566 req->assoclen + req->cryptlen +
2567 (encrypt ? authsize : (-authsize)),
2568 &dst_chained);
2569 } else {
2570 src_nents = sg_count(req->src,
2571 req->assoclen + req->cryptlen +
2572 (encrypt ? authsize : 0),
2573 &src_chained);
2574 }
2575
2576 /* Check if data are contiguous. */
2577 all_contig = !src_nents;
2578 if (!all_contig) {
2579 src_nents = src_nents ? : 1;
2580 sec4_sg_len = src_nents;
2581 }
2582
2583 sec4_sg_len += dst_nents;
2584
2585 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2586
2587 /* allocate space for base edesc and hw desc commands, link tables */
2588 edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
2589 sec4_sg_bytes, GFP_DMA | flags);
2590 if (!edesc) {
2591 dev_err(jrdev, "could not allocate extended descriptor\n");
2592 return ERR_PTR(-ENOMEM);
2593 }
2594
2595 if (likely(req->src == req->dst)) {
2596 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2597 DMA_BIDIRECTIONAL, src_chained);
2598 if (unlikely(!sgc)) {
2599 dev_err(jrdev, "unable to map source\n");
2600 kfree(edesc);
2601 return ERR_PTR(-ENOMEM);
2602 }
2603 } else {
2604 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2605 DMA_TO_DEVICE, src_chained);
2606 if (unlikely(!sgc)) {
2607 dev_err(jrdev, "unable to map source\n");
2608 kfree(edesc);
2609 return ERR_PTR(-ENOMEM);
2610 }
2611
2612 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2613 DMA_FROM_DEVICE, dst_chained);
2614 if (unlikely(!sgc)) {
2615 dev_err(jrdev, "unable to map destination\n");
2616 dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
2617 DMA_TO_DEVICE, src_chained);
2618 kfree(edesc);
2619 return ERR_PTR(-ENOMEM);
2620 }
2621 }
2622
2623 edesc->src_nents = src_nents;
2624 edesc->src_chained = src_chained;
2625 edesc->dst_nents = dst_nents;
2626 edesc->dst_chained = dst_chained;
2627 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2628 desc_bytes;
2629 *all_contig_ptr = all_contig;
2630
2631 sec4_sg_index = 0;
2632 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08002633 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08002634 edesc->sec4_sg + sec4_sg_index, 0);
2635 sec4_sg_index += src_nents;
2636 }
2637 if (dst_nents) {
2638 sg_to_sec4_sg_last(req->dst, dst_nents,
2639 edesc->sec4_sg + sec4_sg_index, 0);
2640 }
2641
2642 if (!sec4_sg_bytes)
2643 return edesc;
2644
2645 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2646 sec4_sg_bytes, DMA_TO_DEVICE);
2647 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2648 dev_err(jrdev, "unable to map S/G table\n");
2649 aead_unmap(jrdev, edesc, req);
2650 kfree(edesc);
2651 return ERR_PTR(-ENOMEM);
2652 }
2653
2654 edesc->sec4_sg_bytes = sec4_sg_bytes;
2655
2656 return edesc;
2657}
2658
2659static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002660{
Yuan Kang0e479302011-07-15 11:21:41 +08002661 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002662 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002663 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2664 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002665 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002666 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002667 int ret = 0;
2668
Kim Phillips8e8ec592011-03-13 16:54:26 +08002669 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002670 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002671 if (IS_ERR(edesc))
2672 return PTR_ERR(edesc);
2673
Yuan Kang1acebad2011-07-15 11:21:42 +08002674 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002675 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002676#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002677 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002678 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2679 desc_bytes(edesc->hw_desc), 1);
2680#endif
2681
Kim Phillips8e8ec592011-03-13 16:54:26 +08002682 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002683 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2684 if (!ret) {
2685 ret = -EINPROGRESS;
2686 } else {
2687 aead_unmap(jrdev, edesc, req);
2688 kfree(edesc);
2689 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002690
Yuan Kang1acebad2011-07-15 11:21:42 +08002691 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002692}
2693
Herbert Xu46218752015-07-09 07:17:33 +08002694static int ipsec_gcm_encrypt(struct aead_request *req)
2695{
2696 if (req->assoclen < 8)
2697 return -EINVAL;
2698
2699 return gcm_encrypt(req);
2700}
2701
Herbert Xuf2147b82015-06-16 13:54:23 +08002702static int old_aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002703{
Yuan Kang1acebad2011-07-15 11:21:42 +08002704 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002705 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002706 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2707 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002708 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002709 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002710 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002711
2712 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002713 edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2714 CAAM_CMD_SZ, &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08002715 if (IS_ERR(edesc))
2716 return PTR_ERR(edesc);
2717
Herbert Xuf2147b82015-06-16 13:54:23 +08002718 /* Create and submit job descriptor */
2719 old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2720 all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002721#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08002722 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2723 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2724 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002725#endif
2726
Herbert Xuf2147b82015-06-16 13:54:23 +08002727 desc = edesc->hw_desc;
2728 ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
2729 if (!ret) {
2730 ret = -EINPROGRESS;
2731 } else {
2732 old_aead_unmap(jrdev, edesc, req);
2733 kfree(edesc);
2734 }
2735
2736 return ret;
2737}
2738
2739static int gcm_decrypt(struct aead_request *req)
2740{
2741 struct aead_edesc *edesc;
2742 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2743 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2744 struct device *jrdev = ctx->jrdev;
2745 bool all_contig;
2746 u32 *desc;
2747 int ret = 0;
2748
2749 /* allocate extended descriptor */
2750 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2751 if (IS_ERR(edesc))
2752 return PTR_ERR(edesc);
2753
Yuan Kang1acebad2011-07-15 11:21:42 +08002754 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08002755 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad2011-07-15 11:21:42 +08002756#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002757 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002758 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2759 desc_bytes(edesc->hw_desc), 1);
2760#endif
2761
Yuan Kang0e479302011-07-15 11:21:41 +08002762 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002763 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2764 if (!ret) {
2765 ret = -EINPROGRESS;
2766 } else {
2767 aead_unmap(jrdev, edesc, req);
2768 kfree(edesc);
2769 }
Yuan Kang0e479302011-07-15 11:21:41 +08002770
Yuan Kang1acebad2011-07-15 11:21:42 +08002771 return ret;
2772}
Yuan Kang0e479302011-07-15 11:21:41 +08002773
Herbert Xu46218752015-07-09 07:17:33 +08002774static int ipsec_gcm_decrypt(struct aead_request *req)
2775{
2776 if (req->assoclen < 8)
2777 return -EINVAL;
2778
2779 return gcm_decrypt(req);
2780}
2781
Herbert Xuf2147b82015-06-16 13:54:23 +08002782static int old_aead_decrypt(struct aead_request *req)
2783{
2784 struct aead_edesc *edesc;
2785 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2786 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2787 struct device *jrdev = ctx->jrdev;
2788 bool all_contig;
2789 u32 *desc;
2790 int ret = 0;
2791
2792 /* allocate extended descriptor */
2793 edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2794 CAAM_CMD_SZ, &all_contig, false);
2795 if (IS_ERR(edesc))
2796 return PTR_ERR(edesc);
2797
2798#ifdef DEBUG
2799 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2800 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2801 req->cryptlen, 1);
2802#endif
2803
2804 /* Create and submit job descriptor*/
2805 old_init_aead_job(ctx->sh_desc_dec,
2806 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2807#ifdef DEBUG
2808 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2809 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2810 desc_bytes(edesc->hw_desc), 1);
2811#endif
2812
2813 desc = edesc->hw_desc;
2814 ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
2815 if (!ret) {
2816 ret = -EINPROGRESS;
2817 } else {
2818 old_aead_unmap(jrdev, edesc, req);
2819 kfree(edesc);
2820 }
2821
2822 return ret;
2823}
2824
Yuan Kang1acebad2011-07-15 11:21:42 +08002825/*
2826 * allocate and map the aead extended descriptor for aead givencrypt
2827 */
2828static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2829 *greq, int desc_bytes,
2830 u32 *contig_ptr)
2831{
2832 struct aead_request *req = &greq->areq;
2833 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2834 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2835 struct device *jrdev = ctx->jrdev;
2836 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2837 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2838 int assoc_nents, src_nents, dst_nents = 0;
2839 struct aead_edesc *edesc;
2840 dma_addr_t iv_dma = 0;
2841 int sgc;
2842 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2843 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002844 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002845 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002846 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002847
Yuan Kang643b39b2012-06-22 19:48:49 -05002848 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2849 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002850
Yuan Kang1acebad2011-07-15 11:21:42 +08002851 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002852 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2853 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002854
Yuan Kang643b39b2012-06-22 19:48:49 -05002855 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002856 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002857 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002858 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2859 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002860 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002861 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2862 DMA_TO_DEVICE, src_chained);
2863 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2864 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002865 }
2866
Yuan Kang1acebad2011-07-15 11:21:42 +08002867 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002868 if (dma_mapping_error(jrdev, iv_dma)) {
2869 dev_err(jrdev, "unable to map IV\n");
2870 return ERR_PTR(-ENOMEM);
2871 }
2872
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002873 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2874 OP_ALG_ALGSEL_AES) &&
2875 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2876 is_gcm = true;
2877
2878 /*
2879 * Check if data are contiguous.
2880 * GCM expected input sequence: IV, AAD, text
2881 * All other - expected input sequence: AAD, IV, text
2882 */
2883
2884 if (is_gcm) {
2885 if (assoc_nents || iv_dma + ivsize !=
2886 sg_dma_address(req->assoc) || src_nents ||
2887 sg_dma_address(req->assoc) + req->assoclen !=
2888 sg_dma_address(req->src))
2889 contig &= ~GIV_SRC_CONTIG;
2890 } else {
2891 if (assoc_nents ||
2892 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2893 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2894 contig &= ~GIV_SRC_CONTIG;
2895 }
2896
Yuan Kang1acebad2011-07-15 11:21:42 +08002897 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2898 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002899
Yuan Kang1acebad2011-07-15 11:21:42 +08002900 if (!(contig & GIV_SRC_CONTIG)) {
2901 assoc_nents = assoc_nents ? : 1;
2902 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002903 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002904 if (req->src == req->dst &&
2905 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad2011-07-15 11:21:42 +08002906 contig &= ~GIV_DST_CONTIG;
2907 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002908
2909 /*
2910 * Add new sg entries for GCM output sequence.
2911 * Expected output sequence: IV, encrypted text.
2912 */
2913 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2914 sec4_sg_len += 1 + src_nents;
2915
2916 if (unlikely(req->src != req->dst)) {
2917 dst_nents = dst_nents ? : 1;
2918 sec4_sg_len += 1 + dst_nents;
2919 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002920
Yuan Kanga299c832012-06-22 19:48:46 -05002921 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002922
2923 /* allocate space for base edesc and hw desc commands, link tables */
2924 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002925 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002926 if (!edesc) {
2927 dev_err(jrdev, "could not allocate extended descriptor\n");
2928 return ERR_PTR(-ENOMEM);
2929 }
2930
2931 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002932 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002933 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002934 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002935 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002936 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002937 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002938 edesc->sec4_sg_bytes = sec4_sg_bytes;
2939 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2940 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002941 *contig_ptr = contig;
2942
Yuan Kanga299c832012-06-22 19:48:46 -05002943 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002944 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002945 if (!is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002946 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2947 edesc->sec4_sg + sec4_sg_index);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002948 sec4_sg_index += assoc_nents;
2949 }
2950
Yuan Kanga299c832012-06-22 19:48:46 -05002951 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002952 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002953 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002954
2955 if (is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002956 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2957 edesc->sec4_sg + sec4_sg_index);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002958 sec4_sg_index += assoc_nents;
2959 }
2960
Yuan Kanga299c832012-06-22 19:48:46 -05002961 sg_to_sec4_sg_last(req->src, src_nents,
2962 edesc->sec4_sg +
2963 sec4_sg_index, 0);
2964 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002965 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002966
2967 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2968 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2969 iv_dma, ivsize, 0);
2970 sec4_sg_index += 1;
2971 sg_to_sec4_sg_last(req->src, src_nents,
2972 edesc->sec4_sg + sec4_sg_index, 0);
2973 }
2974
Yuan Kang1acebad2011-07-15 11:21:42 +08002975 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002976 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002977 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002978 sec4_sg_index += 1;
2979 sg_to_sec4_sg_last(req->dst, dst_nents,
2980 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002981 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302982 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2983 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002984 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2985 dev_err(jrdev, "unable to map S/G table\n");
2986 return ERR_PTR(-ENOMEM);
2987 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002988
2989 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002990}
2991
Herbert Xuf2147b82015-06-16 13:54:23 +08002992static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
Yuan Kang0e479302011-07-15 11:21:41 +08002993{
2994 struct aead_request *req = &areq->areq;
2995 struct aead_edesc *edesc;
2996 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002997 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2998 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002999 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003000 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08003001 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003002
Kim Phillips8e8ec592011-03-13 16:54:26 +08003003 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08003004 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
3005 CAAM_CMD_SZ, &contig);
3006
Kim Phillips8e8ec592011-03-13 16:54:26 +08003007 if (IS_ERR(edesc))
3008 return PTR_ERR(edesc);
3009
Yuan Kang1acebad2011-07-15 11:21:42 +08003010#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003011 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08003012 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
3013 req->cryptlen, 1);
3014#endif
3015
3016 /* Create and submit job descriptor*/
3017 init_aead_giv_job(ctx->sh_desc_givenc,
3018 ctx->sh_desc_givenc_dma, edesc, req, contig);
3019#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003020 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08003021 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3022 desc_bytes(edesc->hw_desc), 1);
3023#endif
3024
Kim Phillips8e8ec592011-03-13 16:54:26 +08003025 desc = edesc->hw_desc;
Herbert Xuf2147b82015-06-16 13:54:23 +08003026 ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
Yuan Kang1acebad2011-07-15 11:21:42 +08003027 if (!ret) {
3028 ret = -EINPROGRESS;
3029 } else {
Herbert Xuf2147b82015-06-16 13:54:23 +08003030 old_aead_unmap(jrdev, edesc, req);
Yuan Kang1acebad2011-07-15 11:21:42 +08003031 kfree(edesc);
3032 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003033
Yuan Kang1acebad2011-07-15 11:21:42 +08003034 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003035}
3036
Horia Geantaae4a8252014-03-14 17:46:52 +02003037static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
3038{
Herbert Xuf2147b82015-06-16 13:54:23 +08003039 return old_aead_encrypt(&areq->areq);
Horia Geantaae4a8252014-03-14 17:46:52 +02003040}
3041
Yuan Kangacdca312011-07-15 11:21:42 +08003042/*
3043 * allocate and map the ablkcipher extended descriptor for ablkcipher
3044 */
3045static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
3046 *req, int desc_bytes,
3047 bool *iv_contig_out)
3048{
3049 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3050 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3051 struct device *jrdev = ctx->jrdev;
3052 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3053 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3054 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05003055 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08003056 struct ablkcipher_edesc *edesc;
3057 dma_addr_t iv_dma = 0;
3058 bool iv_contig = false;
3059 int sgc;
3060 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05003061 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05003062 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08003063
Yuan Kang643b39b2012-06-22 19:48:49 -05003064 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003065
Yuan Kang643b39b2012-06-22 19:48:49 -05003066 if (req->dst != req->src)
3067 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003068
3069 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05003070 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3071 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003072 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05003073 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3074 DMA_TO_DEVICE, src_chained);
3075 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3076 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003077 }
3078
Horia Geantace572082014-07-11 15:34:49 +03003079 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
3080 if (dma_mapping_error(jrdev, iv_dma)) {
3081 dev_err(jrdev, "unable to map IV\n");
3082 return ERR_PTR(-ENOMEM);
3083 }
3084
Yuan Kangacdca312011-07-15 11:21:42 +08003085 /*
3086 * Check if iv can be contiguous with source and destination.
3087 * If so, include it. If not, create scatterlist.
3088 */
Yuan Kangacdca312011-07-15 11:21:42 +08003089 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
3090 iv_contig = true;
3091 else
3092 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05003093 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3094 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08003095
3096 /* allocate space for base edesc and hw desc commands, link tables */
3097 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05003098 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08003099 if (!edesc) {
3100 dev_err(jrdev, "could not allocate extended descriptor\n");
3101 return ERR_PTR(-ENOMEM);
3102 }
3103
3104 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05003105 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08003106 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05003107 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05003108 edesc->sec4_sg_bytes = sec4_sg_bytes;
3109 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3110 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08003111
Yuan Kanga299c832012-06-22 19:48:46 -05003112 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08003113 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05003114 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
3115 sg_to_sec4_sg_last(req->src, src_nents,
3116 edesc->sec4_sg + 1, 0);
3117 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08003118 }
3119
Yuan Kang643b39b2012-06-22 19:48:49 -05003120 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05003121 sg_to_sec4_sg_last(req->dst, dst_nents,
3122 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08003123 }
3124
Yuan Kanga299c832012-06-22 19:48:46 -05003125 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3126 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03003127 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3128 dev_err(jrdev, "unable to map S/G table\n");
3129 return ERR_PTR(-ENOMEM);
3130 }
3131
Yuan Kangacdca312011-07-15 11:21:42 +08003132 edesc->iv_dma = iv_dma;
3133
3134#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003135 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05003136 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3137 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08003138#endif
3139
3140 *iv_contig_out = iv_contig;
3141 return edesc;
3142}
3143
3144static int ablkcipher_encrypt(struct ablkcipher_request *req)
3145{
3146 struct ablkcipher_edesc *edesc;
3147 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3148 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3149 struct device *jrdev = ctx->jrdev;
3150 bool iv_contig;
3151 u32 *desc;
3152 int ret = 0;
3153
3154 /* allocate extended descriptor */
3155 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3156 CAAM_CMD_SZ, &iv_contig);
3157 if (IS_ERR(edesc))
3158 return PTR_ERR(edesc);
3159
3160 /* Create and submit job descriptor*/
3161 init_ablkcipher_job(ctx->sh_desc_enc,
3162 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
3163#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003164 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08003165 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3166 desc_bytes(edesc->hw_desc), 1);
3167#endif
3168 desc = edesc->hw_desc;
3169 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3170
3171 if (!ret) {
3172 ret = -EINPROGRESS;
3173 } else {
3174 ablkcipher_unmap(jrdev, edesc, req);
3175 kfree(edesc);
3176 }
3177
3178 return ret;
3179}
3180
3181static int ablkcipher_decrypt(struct ablkcipher_request *req)
3182{
3183 struct ablkcipher_edesc *edesc;
3184 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3185 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3186 struct device *jrdev = ctx->jrdev;
3187 bool iv_contig;
3188 u32 *desc;
3189 int ret = 0;
3190
3191 /* allocate extended descriptor */
3192 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3193 CAAM_CMD_SZ, &iv_contig);
3194 if (IS_ERR(edesc))
3195 return PTR_ERR(edesc);
3196
3197 /* Create and submit job descriptor*/
3198 init_ablkcipher_job(ctx->sh_desc_dec,
3199 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
3200 desc = edesc->hw_desc;
3201#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003202 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08003203 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3204 desc_bytes(edesc->hw_desc), 1);
3205#endif
3206
3207 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
3208 if (!ret) {
3209 ret = -EINPROGRESS;
3210 } else {
3211 ablkcipher_unmap(jrdev, edesc, req);
3212 kfree(edesc);
3213 }
3214
3215 return ret;
3216}
3217
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003218/*
3219 * allocate and map the ablkcipher extended descriptor
3220 * for ablkcipher givencrypt
3221 */
3222static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
3223 struct skcipher_givcrypt_request *greq,
3224 int desc_bytes,
3225 bool *iv_contig_out)
3226{
3227 struct ablkcipher_request *req = &greq->creq;
3228 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3229 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3230 struct device *jrdev = ctx->jrdev;
3231 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3232 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3233 GFP_KERNEL : GFP_ATOMIC;
3234 int src_nents, dst_nents = 0, sec4_sg_bytes;
3235 struct ablkcipher_edesc *edesc;
3236 dma_addr_t iv_dma = 0;
3237 bool iv_contig = false;
3238 int sgc;
3239 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3240 bool src_chained = false, dst_chained = false;
3241 int sec4_sg_index;
3242
3243 src_nents = sg_count(req->src, req->nbytes, &src_chained);
3244
3245 if (unlikely(req->dst != req->src))
3246 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3247
3248 if (likely(req->src == req->dst)) {
3249 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3250 DMA_BIDIRECTIONAL, src_chained);
3251 } else {
3252 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3253 DMA_TO_DEVICE, src_chained);
3254 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3255 DMA_FROM_DEVICE, dst_chained);
3256 }
3257
3258 /*
3259 * Check if iv can be contiguous with source and destination.
3260 * If so, include it. If not, create scatterlist.
3261 */
3262 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
3263 if (dma_mapping_error(jrdev, iv_dma)) {
3264 dev_err(jrdev, "unable to map IV\n");
3265 return ERR_PTR(-ENOMEM);
3266 }
3267
3268 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
3269 iv_contig = true;
3270 else
3271 dst_nents = dst_nents ? : 1;
3272 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3273 sizeof(struct sec4_sg_entry);
3274
3275 /* allocate space for base edesc and hw desc commands, link tables */
3276 edesc = kmalloc(sizeof(*edesc) + desc_bytes +
3277 sec4_sg_bytes, GFP_DMA | flags);
3278 if (!edesc) {
3279 dev_err(jrdev, "could not allocate extended descriptor\n");
3280 return ERR_PTR(-ENOMEM);
3281 }
3282
3283 edesc->src_nents = src_nents;
3284 edesc->src_chained = src_chained;
3285 edesc->dst_nents = dst_nents;
3286 edesc->dst_chained = dst_chained;
3287 edesc->sec4_sg_bytes = sec4_sg_bytes;
3288 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3289 desc_bytes;
3290
3291 sec4_sg_index = 0;
3292 if (src_nents) {
3293 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
3294 sec4_sg_index += src_nents;
3295 }
3296
3297 if (!iv_contig) {
3298 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
3299 iv_dma, ivsize, 0);
3300 sec4_sg_index += 1;
3301 sg_to_sec4_sg_last(req->dst, dst_nents,
3302 edesc->sec4_sg + sec4_sg_index, 0);
3303 }
3304
3305 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3306 sec4_sg_bytes, DMA_TO_DEVICE);
3307 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3308 dev_err(jrdev, "unable to map S/G table\n");
3309 return ERR_PTR(-ENOMEM);
3310 }
3311 edesc->iv_dma = iv_dma;
3312
3313#ifdef DEBUG
3314 print_hex_dump(KERN_ERR,
3315 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
3316 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3317 sec4_sg_bytes, 1);
3318#endif
3319
3320 *iv_contig_out = iv_contig;
3321 return edesc;
3322}
3323
3324static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
3325{
3326 struct ablkcipher_request *req = &creq->creq;
3327 struct ablkcipher_edesc *edesc;
3328 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3329 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3330 struct device *jrdev = ctx->jrdev;
3331 bool iv_contig;
3332 u32 *desc;
3333 int ret = 0;
3334
3335 /* allocate extended descriptor */
3336 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
3337 CAAM_CMD_SZ, &iv_contig);
3338 if (IS_ERR(edesc))
3339 return PTR_ERR(edesc);
3340
3341 /* Create and submit job descriptor*/
3342 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
3343 edesc, req, iv_contig);
3344#ifdef DEBUG
3345 print_hex_dump(KERN_ERR,
3346 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
3347 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3348 desc_bytes(edesc->hw_desc), 1);
3349#endif
3350 desc = edesc->hw_desc;
3351 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3352
3353 if (!ret) {
3354 ret = -EINPROGRESS;
3355 } else {
3356 ablkcipher_unmap(jrdev, edesc, req);
3357 kfree(edesc);
3358 }
3359
3360 return ret;
3361}
3362
Yuan Kang885e9e22011-07-15 11:21:41 +08003363#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08003364#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08003365struct caam_alg_template {
3366 char name[CRYPTO_MAX_ALG_NAME];
3367 char driver_name[CRYPTO_MAX_ALG_NAME];
3368 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08003369 u32 type;
3370 union {
3371 struct ablkcipher_alg ablkcipher;
Herbert Xuae13ed442015-05-21 15:11:03 +08003372 struct old_aead_alg aead;
Yuan Kang885e9e22011-07-15 11:21:41 +08003373 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003374 u32 class1_alg_type;
3375 u32 class2_alg_type;
3376 u32 alg_op;
3377};
3378
3379static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02003380 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08003381 {
Horia Geantaae4a8252014-03-14 17:46:52 +02003382 .name = "authenc(hmac(md5),ecb(cipher_null))",
3383 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
3384 .blocksize = NULL_BLOCK_SIZE,
3385 .type = CRYPTO_ALG_TYPE_AEAD,
3386 .template_aead = {
3387 .setkey = aead_setkey,
3388 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003389 .encrypt = old_aead_encrypt,
3390 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003391 .givencrypt = aead_null_givencrypt,
3392 .geniv = "<built-in>",
3393 .ivsize = NULL_IV_SIZE,
3394 .maxauthsize = MD5_DIGEST_SIZE,
3395 },
3396 .class1_alg_type = 0,
3397 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3398 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3399 },
3400 {
3401 .name = "authenc(hmac(sha1),ecb(cipher_null))",
3402 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
3403 .blocksize = NULL_BLOCK_SIZE,
3404 .type = CRYPTO_ALG_TYPE_AEAD,
3405 .template_aead = {
3406 .setkey = aead_setkey,
3407 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003408 .encrypt = old_aead_encrypt,
3409 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003410 .givencrypt = aead_null_givencrypt,
3411 .geniv = "<built-in>",
3412 .ivsize = NULL_IV_SIZE,
3413 .maxauthsize = SHA1_DIGEST_SIZE,
3414 },
3415 .class1_alg_type = 0,
3416 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3417 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3418 },
3419 {
3420 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3421 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3422 .blocksize = NULL_BLOCK_SIZE,
3423 .type = CRYPTO_ALG_TYPE_AEAD,
3424 .template_aead = {
3425 .setkey = aead_setkey,
3426 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003427 .encrypt = old_aead_encrypt,
3428 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003429 .givencrypt = aead_null_givencrypt,
3430 .geniv = "<built-in>",
3431 .ivsize = NULL_IV_SIZE,
3432 .maxauthsize = SHA224_DIGEST_SIZE,
3433 },
3434 .class1_alg_type = 0,
3435 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3436 OP_ALG_AAI_HMAC_PRECOMP,
3437 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3438 },
3439 {
3440 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3441 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3442 .blocksize = NULL_BLOCK_SIZE,
3443 .type = CRYPTO_ALG_TYPE_AEAD,
3444 .template_aead = {
3445 .setkey = aead_setkey,
3446 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003447 .encrypt = old_aead_encrypt,
3448 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003449 .givencrypt = aead_null_givencrypt,
3450 .geniv = "<built-in>",
3451 .ivsize = NULL_IV_SIZE,
3452 .maxauthsize = SHA256_DIGEST_SIZE,
3453 },
3454 .class1_alg_type = 0,
3455 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3456 OP_ALG_AAI_HMAC_PRECOMP,
3457 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3458 },
3459 {
3460 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3461 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3462 .blocksize = NULL_BLOCK_SIZE,
3463 .type = CRYPTO_ALG_TYPE_AEAD,
3464 .template_aead = {
3465 .setkey = aead_setkey,
3466 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003467 .encrypt = old_aead_encrypt,
3468 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003469 .givencrypt = aead_null_givencrypt,
3470 .geniv = "<built-in>",
3471 .ivsize = NULL_IV_SIZE,
3472 .maxauthsize = SHA384_DIGEST_SIZE,
3473 },
3474 .class1_alg_type = 0,
3475 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3476 OP_ALG_AAI_HMAC_PRECOMP,
3477 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3478 },
3479 {
3480 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3481 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3482 .blocksize = NULL_BLOCK_SIZE,
3483 .type = CRYPTO_ALG_TYPE_AEAD,
3484 .template_aead = {
3485 .setkey = aead_setkey,
3486 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003487 .encrypt = old_aead_encrypt,
3488 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003489 .givencrypt = aead_null_givencrypt,
3490 .geniv = "<built-in>",
3491 .ivsize = NULL_IV_SIZE,
3492 .maxauthsize = SHA512_DIGEST_SIZE,
3493 },
3494 .class1_alg_type = 0,
3495 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3496 OP_ALG_AAI_HMAC_PRECOMP,
3497 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3498 },
3499 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003500 .name = "authenc(hmac(md5),cbc(aes))",
3501 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3502 .blocksize = AES_BLOCK_SIZE,
3503 .type = CRYPTO_ALG_TYPE_AEAD,
3504 .template_aead = {
3505 .setkey = aead_setkey,
3506 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003507 .encrypt = old_aead_encrypt,
3508 .decrypt = old_aead_decrypt,
3509 .givencrypt = old_aead_givencrypt,
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003510 .geniv = "<built-in>",
3511 .ivsize = AES_BLOCK_SIZE,
3512 .maxauthsize = MD5_DIGEST_SIZE,
3513 },
3514 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3515 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3516 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3517 },
3518 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003519 .name = "authenc(hmac(sha1),cbc(aes))",
3520 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3521 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003522 .type = CRYPTO_ALG_TYPE_AEAD,
3523 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003524 .setkey = aead_setkey,
3525 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003526 .encrypt = old_aead_encrypt,
3527 .decrypt = old_aead_decrypt,
3528 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003529 .geniv = "<built-in>",
3530 .ivsize = AES_BLOCK_SIZE,
3531 .maxauthsize = SHA1_DIGEST_SIZE,
3532 },
3533 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3534 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3535 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3536 },
3537 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003538 .name = "authenc(hmac(sha224),cbc(aes))",
3539 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3540 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303541 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003542 .template_aead = {
3543 .setkey = aead_setkey,
3544 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003545 .encrypt = old_aead_encrypt,
3546 .decrypt = old_aead_decrypt,
3547 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003548 .geniv = "<built-in>",
3549 .ivsize = AES_BLOCK_SIZE,
3550 .maxauthsize = SHA224_DIGEST_SIZE,
3551 },
3552 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3553 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3554 OP_ALG_AAI_HMAC_PRECOMP,
3555 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3556 },
3557 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003558 .name = "authenc(hmac(sha256),cbc(aes))",
3559 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3560 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003561 .type = CRYPTO_ALG_TYPE_AEAD,
3562 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003563 .setkey = aead_setkey,
3564 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003565 .encrypt = old_aead_encrypt,
3566 .decrypt = old_aead_decrypt,
3567 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003568 .geniv = "<built-in>",
3569 .ivsize = AES_BLOCK_SIZE,
3570 .maxauthsize = SHA256_DIGEST_SIZE,
3571 },
3572 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3573 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3574 OP_ALG_AAI_HMAC_PRECOMP,
3575 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3576 },
3577 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003578 .name = "authenc(hmac(sha384),cbc(aes))",
3579 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3580 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303581 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003582 .template_aead = {
3583 .setkey = aead_setkey,
3584 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003585 .encrypt = old_aead_encrypt,
3586 .decrypt = old_aead_decrypt,
3587 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003588 .geniv = "<built-in>",
3589 .ivsize = AES_BLOCK_SIZE,
3590 .maxauthsize = SHA384_DIGEST_SIZE,
3591 },
3592 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3593 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3594 OP_ALG_AAI_HMAC_PRECOMP,
3595 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3596 },
3597
3598 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003599 .name = "authenc(hmac(sha512),cbc(aes))",
3600 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3601 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003602 .type = CRYPTO_ALG_TYPE_AEAD,
3603 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003604 .setkey = aead_setkey,
3605 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003606 .encrypt = old_aead_encrypt,
3607 .decrypt = old_aead_decrypt,
3608 .givencrypt = old_aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003609 .geniv = "<built-in>",
3610 .ivsize = AES_BLOCK_SIZE,
3611 .maxauthsize = SHA512_DIGEST_SIZE,
3612 },
3613 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3614 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3615 OP_ALG_AAI_HMAC_PRECOMP,
3616 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3617 },
3618 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003619 .name = "authenc(hmac(md5),cbc(des3_ede))",
3620 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3621 .blocksize = DES3_EDE_BLOCK_SIZE,
3622 .type = CRYPTO_ALG_TYPE_AEAD,
3623 .template_aead = {
3624 .setkey = aead_setkey,
3625 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003626 .encrypt = old_aead_encrypt,
3627 .decrypt = old_aead_decrypt,
3628 .givencrypt = old_aead_givencrypt,
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003629 .geniv = "<built-in>",
3630 .ivsize = DES3_EDE_BLOCK_SIZE,
3631 .maxauthsize = MD5_DIGEST_SIZE,
3632 },
3633 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3634 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3635 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3636 },
3637 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003638 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3639 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3640 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003641 .type = CRYPTO_ALG_TYPE_AEAD,
3642 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003643 .setkey = aead_setkey,
3644 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003645 .encrypt = old_aead_encrypt,
3646 .decrypt = old_aead_decrypt,
3647 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003648 .geniv = "<built-in>",
3649 .ivsize = DES3_EDE_BLOCK_SIZE,
3650 .maxauthsize = SHA1_DIGEST_SIZE,
3651 },
3652 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3653 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3654 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3655 },
3656 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003657 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3658 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3659 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303660 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003661 .template_aead = {
3662 .setkey = aead_setkey,
3663 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003664 .encrypt = old_aead_encrypt,
3665 .decrypt = old_aead_decrypt,
3666 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003667 .geniv = "<built-in>",
3668 .ivsize = DES3_EDE_BLOCK_SIZE,
3669 .maxauthsize = SHA224_DIGEST_SIZE,
3670 },
3671 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3672 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3673 OP_ALG_AAI_HMAC_PRECOMP,
3674 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3675 },
3676 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003677 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3678 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3679 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003680 .type = CRYPTO_ALG_TYPE_AEAD,
3681 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003682 .setkey = aead_setkey,
3683 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003684 .encrypt = old_aead_encrypt,
3685 .decrypt = old_aead_decrypt,
3686 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003687 .geniv = "<built-in>",
3688 .ivsize = DES3_EDE_BLOCK_SIZE,
3689 .maxauthsize = SHA256_DIGEST_SIZE,
3690 },
3691 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3692 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3693 OP_ALG_AAI_HMAC_PRECOMP,
3694 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3695 },
3696 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003697 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3698 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3699 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303700 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003701 .template_aead = {
3702 .setkey = aead_setkey,
3703 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003704 .encrypt = old_aead_encrypt,
3705 .decrypt = old_aead_decrypt,
3706 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003707 .geniv = "<built-in>",
3708 .ivsize = DES3_EDE_BLOCK_SIZE,
3709 .maxauthsize = SHA384_DIGEST_SIZE,
3710 },
3711 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3712 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3713 OP_ALG_AAI_HMAC_PRECOMP,
3714 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3715 },
3716 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003717 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3718 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3719 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003720 .type = CRYPTO_ALG_TYPE_AEAD,
3721 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003722 .setkey = aead_setkey,
3723 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003724 .encrypt = old_aead_encrypt,
3725 .decrypt = old_aead_decrypt,
3726 .givencrypt = old_aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003727 .geniv = "<built-in>",
3728 .ivsize = DES3_EDE_BLOCK_SIZE,
3729 .maxauthsize = SHA512_DIGEST_SIZE,
3730 },
3731 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3732 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3733 OP_ALG_AAI_HMAC_PRECOMP,
3734 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3735 },
3736 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003737 .name = "authenc(hmac(md5),cbc(des))",
3738 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3739 .blocksize = DES_BLOCK_SIZE,
3740 .type = CRYPTO_ALG_TYPE_AEAD,
3741 .template_aead = {
3742 .setkey = aead_setkey,
3743 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003744 .encrypt = old_aead_encrypt,
3745 .decrypt = old_aead_decrypt,
3746 .givencrypt = old_aead_givencrypt,
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003747 .geniv = "<built-in>",
3748 .ivsize = DES_BLOCK_SIZE,
3749 .maxauthsize = MD5_DIGEST_SIZE,
3750 },
3751 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3752 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3753 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3754 },
3755 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003756 .name = "authenc(hmac(sha1),cbc(des))",
3757 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3758 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003759 .type = CRYPTO_ALG_TYPE_AEAD,
3760 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003761 .setkey = aead_setkey,
3762 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003763 .encrypt = old_aead_encrypt,
3764 .decrypt = old_aead_decrypt,
3765 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003766 .geniv = "<built-in>",
3767 .ivsize = DES_BLOCK_SIZE,
3768 .maxauthsize = SHA1_DIGEST_SIZE,
3769 },
3770 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3771 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3772 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3773 },
3774 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003775 .name = "authenc(hmac(sha224),cbc(des))",
3776 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3777 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303778 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003779 .template_aead = {
3780 .setkey = aead_setkey,
3781 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003782 .encrypt = old_aead_encrypt,
3783 .decrypt = old_aead_decrypt,
3784 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003785 .geniv = "<built-in>",
3786 .ivsize = DES_BLOCK_SIZE,
3787 .maxauthsize = SHA224_DIGEST_SIZE,
3788 },
3789 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3790 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3791 OP_ALG_AAI_HMAC_PRECOMP,
3792 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3793 },
3794 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003795 .name = "authenc(hmac(sha256),cbc(des))",
3796 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3797 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003798 .type = CRYPTO_ALG_TYPE_AEAD,
3799 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003800 .setkey = aead_setkey,
3801 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003802 .encrypt = old_aead_encrypt,
3803 .decrypt = old_aead_decrypt,
3804 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003805 .geniv = "<built-in>",
3806 .ivsize = DES_BLOCK_SIZE,
3807 .maxauthsize = SHA256_DIGEST_SIZE,
3808 },
3809 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3810 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3811 OP_ALG_AAI_HMAC_PRECOMP,
3812 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3813 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05003814 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003815 .name = "authenc(hmac(sha384),cbc(des))",
3816 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3817 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303818 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003819 .template_aead = {
3820 .setkey = aead_setkey,
3821 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003822 .encrypt = old_aead_encrypt,
3823 .decrypt = old_aead_decrypt,
3824 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003825 .geniv = "<built-in>",
3826 .ivsize = DES_BLOCK_SIZE,
3827 .maxauthsize = SHA384_DIGEST_SIZE,
3828 },
3829 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3830 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3831 OP_ALG_AAI_HMAC_PRECOMP,
3832 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3833 },
3834 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003835 .name = "authenc(hmac(sha512),cbc(des))",
3836 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3837 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003838 .type = CRYPTO_ALG_TYPE_AEAD,
3839 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003840 .setkey = aead_setkey,
3841 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003842 .encrypt = old_aead_encrypt,
3843 .decrypt = old_aead_decrypt,
3844 .givencrypt = old_aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003845 .geniv = "<built-in>",
3846 .ivsize = DES_BLOCK_SIZE,
3847 .maxauthsize = SHA512_DIGEST_SIZE,
3848 },
3849 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3850 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3851 OP_ALG_AAI_HMAC_PRECOMP,
3852 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3853 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003854 {
Catalin Vasiledaebc462014-10-31 12:45:37 +02003855 .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
3856 .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3857 .blocksize = 1,
3858 .type = CRYPTO_ALG_TYPE_AEAD,
3859 .template_aead = {
3860 .setkey = aead_setkey,
3861 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003862 .encrypt = old_aead_encrypt,
3863 .decrypt = old_aead_decrypt,
3864 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003865 .geniv = "<built-in>",
3866 .ivsize = CTR_RFC3686_IV_SIZE,
3867 .maxauthsize = MD5_DIGEST_SIZE,
3868 },
3869 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3870 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3871 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3872 },
3873 {
3874 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3875 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3876 .blocksize = 1,
3877 .type = CRYPTO_ALG_TYPE_AEAD,
3878 .template_aead = {
3879 .setkey = aead_setkey,
3880 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003881 .encrypt = old_aead_encrypt,
3882 .decrypt = old_aead_decrypt,
3883 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003884 .geniv = "<built-in>",
3885 .ivsize = CTR_RFC3686_IV_SIZE,
3886 .maxauthsize = SHA1_DIGEST_SIZE,
3887 },
3888 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3889 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3890 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3891 },
3892 {
3893 .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3894 .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3895 .blocksize = 1,
3896 .type = CRYPTO_ALG_TYPE_AEAD,
3897 .template_aead = {
3898 .setkey = aead_setkey,
3899 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003900 .encrypt = old_aead_encrypt,
3901 .decrypt = old_aead_decrypt,
3902 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003903 .geniv = "<built-in>",
3904 .ivsize = CTR_RFC3686_IV_SIZE,
3905 .maxauthsize = SHA224_DIGEST_SIZE,
3906 },
3907 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3908 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3909 OP_ALG_AAI_HMAC_PRECOMP,
3910 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3911 },
3912 {
3913 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3914 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3915 .blocksize = 1,
3916 .type = CRYPTO_ALG_TYPE_AEAD,
3917 .template_aead = {
3918 .setkey = aead_setkey,
3919 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003920 .encrypt = old_aead_encrypt,
3921 .decrypt = old_aead_decrypt,
3922 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003923 .geniv = "<built-in>",
3924 .ivsize = CTR_RFC3686_IV_SIZE,
3925 .maxauthsize = SHA256_DIGEST_SIZE,
3926 },
3927 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3928 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3929 OP_ALG_AAI_HMAC_PRECOMP,
3930 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3931 },
3932 {
3933 .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3934 .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3935 .blocksize = 1,
3936 .type = CRYPTO_ALG_TYPE_AEAD,
3937 .template_aead = {
3938 .setkey = aead_setkey,
3939 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003940 .encrypt = old_aead_encrypt,
3941 .decrypt = old_aead_decrypt,
3942 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003943 .geniv = "<built-in>",
3944 .ivsize = CTR_RFC3686_IV_SIZE,
3945 .maxauthsize = SHA384_DIGEST_SIZE,
3946 },
3947 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3948 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3949 OP_ALG_AAI_HMAC_PRECOMP,
3950 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3951 },
3952 {
3953 .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3954 .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3955 .blocksize = 1,
3956 .type = CRYPTO_ALG_TYPE_AEAD,
3957 .template_aead = {
3958 .setkey = aead_setkey,
3959 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003960 .encrypt = old_aead_encrypt,
3961 .decrypt = old_aead_decrypt,
3962 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003963 .geniv = "<built-in>",
3964 .ivsize = CTR_RFC3686_IV_SIZE,
3965 .maxauthsize = SHA512_DIGEST_SIZE,
3966 },
3967 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3968 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3969 OP_ALG_AAI_HMAC_PRECOMP,
3970 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3971 },
Yuan Kangacdca312011-07-15 11:21:42 +08003972 /* ablkcipher descriptor */
3973 {
3974 .name = "cbc(aes)",
3975 .driver_name = "cbc-aes-caam",
3976 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003977 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08003978 .template_ablkcipher = {
3979 .setkey = ablkcipher_setkey,
3980 .encrypt = ablkcipher_encrypt,
3981 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003982 .givencrypt = ablkcipher_givencrypt,
3983 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08003984 .min_keysize = AES_MIN_KEY_SIZE,
3985 .max_keysize = AES_MAX_KEY_SIZE,
3986 .ivsize = AES_BLOCK_SIZE,
3987 },
3988 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3989 },
3990 {
3991 .name = "cbc(des3_ede)",
3992 .driver_name = "cbc-3des-caam",
3993 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02003994 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08003995 .template_ablkcipher = {
3996 .setkey = ablkcipher_setkey,
3997 .encrypt = ablkcipher_encrypt,
3998 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02003999 .givencrypt = ablkcipher_givencrypt,
4000 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08004001 .min_keysize = DES3_EDE_KEY_SIZE,
4002 .max_keysize = DES3_EDE_KEY_SIZE,
4003 .ivsize = DES3_EDE_BLOCK_SIZE,
4004 },
4005 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
4006 },
4007 {
4008 .name = "cbc(des)",
4009 .driver_name = "cbc-des-caam",
4010 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02004011 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08004012 .template_ablkcipher = {
4013 .setkey = ablkcipher_setkey,
4014 .encrypt = ablkcipher_encrypt,
4015 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02004016 .givencrypt = ablkcipher_givencrypt,
4017 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08004018 .min_keysize = DES_KEY_SIZE,
4019 .max_keysize = DES_KEY_SIZE,
4020 .ivsize = DES_BLOCK_SIZE,
4021 },
4022 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02004023 },
4024 {
4025 .name = "ctr(aes)",
4026 .driver_name = "ctr-aes-caam",
4027 .blocksize = 1,
4028 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4029 .template_ablkcipher = {
4030 .setkey = ablkcipher_setkey,
4031 .encrypt = ablkcipher_encrypt,
4032 .decrypt = ablkcipher_decrypt,
4033 .geniv = "chainiv",
4034 .min_keysize = AES_MIN_KEY_SIZE,
4035 .max_keysize = AES_MAX_KEY_SIZE,
4036 .ivsize = AES_BLOCK_SIZE,
4037 },
4038 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02004039 },
4040 {
4041 .name = "rfc3686(ctr(aes))",
4042 .driver_name = "rfc3686-ctr-aes-caam",
4043 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004044 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02004045 .template_ablkcipher = {
4046 .setkey = ablkcipher_setkey,
4047 .encrypt = ablkcipher_encrypt,
4048 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004049 .givencrypt = ablkcipher_givencrypt,
4050 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02004051 .min_keysize = AES_MIN_KEY_SIZE +
4052 CTR_RFC3686_NONCE_SIZE,
4053 .max_keysize = AES_MAX_KEY_SIZE +
4054 CTR_RFC3686_NONCE_SIZE,
4055 .ivsize = CTR_RFC3686_IV_SIZE,
4056 },
4057 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Yuan Kangacdca312011-07-15 11:21:42 +08004058 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004059};
4060
Herbert Xuf2147b82015-06-16 13:54:23 +08004061struct caam_alg_entry {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004062 int class1_alg_type;
4063 int class2_alg_type;
4064 int alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004065};
4066
Herbert Xuf2147b82015-06-16 13:54:23 +08004067struct caam_aead_alg {
4068 struct aead_alg aead;
4069 struct caam_alg_entry caam;
4070 bool registered;
4071};
4072
4073static struct caam_aead_alg driver_aeads[] = {
4074 {
4075 .aead = {
4076 .base = {
4077 .cra_name = "rfc4106(gcm(aes))",
4078 .cra_driver_name = "rfc4106-gcm-aes-caam",
4079 .cra_blocksize = 1,
4080 },
4081 .setkey = rfc4106_setkey,
4082 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08004083 .encrypt = ipsec_gcm_encrypt,
4084 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08004085 .ivsize = 8,
4086 .maxauthsize = AES_BLOCK_SIZE,
4087 },
4088 .caam = {
4089 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4090 },
4091 },
4092 {
4093 .aead = {
4094 .base = {
4095 .cra_name = "rfc4543(gcm(aes))",
4096 .cra_driver_name = "rfc4543-gcm-aes-caam",
4097 .cra_blocksize = 1,
4098 },
4099 .setkey = rfc4543_setkey,
4100 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08004101 .encrypt = ipsec_gcm_encrypt,
4102 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08004103 .ivsize = 8,
4104 .maxauthsize = AES_BLOCK_SIZE,
4105 },
4106 .caam = {
4107 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4108 },
4109 },
4110 /* Galois Counter Mode */
4111 {
4112 .aead = {
4113 .base = {
4114 .cra_name = "gcm(aes)",
4115 .cra_driver_name = "gcm-aes-caam",
4116 .cra_blocksize = 1,
4117 },
4118 .setkey = gcm_setkey,
4119 .setauthsize = gcm_setauthsize,
4120 .encrypt = gcm_encrypt,
4121 .decrypt = gcm_decrypt,
4122 .ivsize = 12,
4123 .maxauthsize = AES_BLOCK_SIZE,
4124 },
4125 .caam = {
4126 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4127 },
4128 },
4129};
4130
4131struct caam_crypto_alg {
4132 struct crypto_alg crypto_alg;
4133 struct list_head entry;
4134 struct caam_alg_entry caam;
4135};
4136
4137static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4138{
4139 ctx->jrdev = caam_jr_alloc();
4140 if (IS_ERR(ctx->jrdev)) {
4141 pr_err("Job Ring Device allocation for transform failed\n");
4142 return PTR_ERR(ctx->jrdev);
4143 }
4144
4145 /* copy descriptor header template value */
4146 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4147 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4148 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4149
4150 return 0;
4151}
4152
Kim Phillips8e8ec592011-03-13 16:54:26 +08004153static int caam_cra_init(struct crypto_tfm *tfm)
4154{
4155 struct crypto_alg *alg = tfm->__crt_alg;
4156 struct caam_crypto_alg *caam_alg =
4157 container_of(alg, struct caam_crypto_alg, crypto_alg);
4158 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004159
Herbert Xuf2147b82015-06-16 13:54:23 +08004160 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004161}
4162
Herbert Xuf2147b82015-06-16 13:54:23 +08004163static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004164{
Herbert Xuf2147b82015-06-16 13:54:23 +08004165 struct aead_alg *alg = crypto_aead_alg(tfm);
4166 struct caam_aead_alg *caam_alg =
4167 container_of(alg, struct caam_aead_alg, aead);
4168 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004169
Herbert Xuf2147b82015-06-16 13:54:23 +08004170 return caam_init_common(ctx, &caam_alg->caam);
4171}
4172
4173static void caam_exit_common(struct caam_ctx *ctx)
4174{
Yuan Kang1acebad2011-07-15 11:21:42 +08004175 if (ctx->sh_desc_enc_dma &&
4176 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4177 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4178 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4179 if (ctx->sh_desc_dec_dma &&
4180 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4181 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4182 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4183 if (ctx->sh_desc_givenc_dma &&
4184 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4185 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4186 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05004187 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02004188 if (ctx->key_dma &&
4189 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4190 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4191 ctx->enckeylen + ctx->split_key_pad_len,
4192 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304193
4194 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004195}
4196
Herbert Xuf2147b82015-06-16 13:54:23 +08004197static void caam_cra_exit(struct crypto_tfm *tfm)
4198{
4199 caam_exit_common(crypto_tfm_ctx(tfm));
4200}
4201
4202static void caam_aead_exit(struct crypto_aead *tfm)
4203{
4204 caam_exit_common(crypto_aead_ctx(tfm));
4205}
4206
Kim Phillips8e8ec592011-03-13 16:54:26 +08004207static void __exit caam_algapi_exit(void)
4208{
4209
Kim Phillips8e8ec592011-03-13 16:54:26 +08004210 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08004211 int i;
4212
4213 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4214 struct caam_aead_alg *t_alg = driver_aeads + i;
4215
4216 if (t_alg->registered)
4217 crypto_unregister_aead(&t_alg->aead);
4218 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004219
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304220 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004221 return;
4222
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304223 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004224 crypto_unregister_alg(&t_alg->crypto_alg);
4225 list_del(&t_alg->entry);
4226 kfree(t_alg);
4227 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004228}
4229
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304230static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08004231 *template)
4232{
4233 struct caam_crypto_alg *t_alg;
4234 struct crypto_alg *alg;
4235
4236 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
4237 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304238 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004239 return ERR_PTR(-ENOMEM);
4240 }
4241
4242 alg = &t_alg->crypto_alg;
4243
4244 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4245 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4246 template->driver_name);
4247 alg->cra_module = THIS_MODULE;
4248 alg->cra_init = caam_cra_init;
4249 alg->cra_exit = caam_cra_exit;
4250 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004251 alg->cra_blocksize = template->blocksize;
4252 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004253 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01004254 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4255 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08004256 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004257 case CRYPTO_ALG_TYPE_GIVCIPHER:
4258 alg->cra_type = &crypto_givcipher_type;
4259 alg->cra_ablkcipher = template->template_ablkcipher;
4260 break;
Yuan Kangacdca312011-07-15 11:21:42 +08004261 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4262 alg->cra_type = &crypto_ablkcipher_type;
4263 alg->cra_ablkcipher = template->template_ablkcipher;
4264 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08004265 case CRYPTO_ALG_TYPE_AEAD:
4266 alg->cra_type = &crypto_aead_type;
4267 alg->cra_aead = template->template_aead;
4268 break;
4269 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004270
Herbert Xuf2147b82015-06-16 13:54:23 +08004271 t_alg->caam.class1_alg_type = template->class1_alg_type;
4272 t_alg->caam.class2_alg_type = template->class2_alg_type;
4273 t_alg->caam.alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004274
4275 return t_alg;
4276}
4277
Herbert Xuf2147b82015-06-16 13:54:23 +08004278static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4279{
4280 struct aead_alg *alg = &t_alg->aead;
4281
4282 alg->base.cra_module = THIS_MODULE;
4283 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4284 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu46218752015-07-09 07:17:33 +08004285 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4286 CRYPTO_ALG_AEAD_NEW;
Herbert Xuf2147b82015-06-16 13:54:23 +08004287
4288 alg->init = caam_aead_init;
4289 alg->exit = caam_aead_exit;
4290}
4291
Kim Phillips8e8ec592011-03-13 16:54:26 +08004292static int __init caam_algapi_init(void)
4293{
Ruchika Gupta35af6402014-07-07 10:42:12 +05304294 struct device_node *dev_node;
4295 struct platform_device *pdev;
4296 struct device *ctrldev;
4297 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004298 int i = 0, err = 0;
Herbert Xuf2147b82015-06-16 13:54:23 +08004299 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004300
Ruchika Gupta35af6402014-07-07 10:42:12 +05304301 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4302 if (!dev_node) {
4303 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4304 if (!dev_node)
4305 return -ENODEV;
4306 }
4307
4308 pdev = of_find_device_by_node(dev_node);
4309 if (!pdev) {
4310 of_node_put(dev_node);
4311 return -ENODEV;
4312 }
4313
4314 ctrldev = &pdev->dev;
4315 priv = dev_get_drvdata(ctrldev);
4316 of_node_put(dev_node);
4317
4318 /*
4319 * If priv is NULL, it's probably because the caam driver wasn't
4320 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4321 */
4322 if (!priv)
4323 return -ENODEV;
4324
4325
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304326 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004327
4328 /* register crypto algorithms the device supports */
4329 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4330 /* TODO: check if h/w supports alg */
4331 struct caam_crypto_alg *t_alg;
4332
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304333 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004334 if (IS_ERR(t_alg)) {
4335 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304336 pr_warn("%s alg allocation failed\n",
4337 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004338 continue;
4339 }
4340
4341 err = crypto_register_alg(&t_alg->crypto_alg);
4342 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304343 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004344 t_alg->crypto_alg.cra_driver_name);
4345 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08004346 continue;
4347 }
4348
4349 list_add_tail(&t_alg->entry, &alg_list);
4350 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004351 }
Herbert Xuf2147b82015-06-16 13:54:23 +08004352
4353 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4354 struct caam_aead_alg *t_alg = driver_aeads + i;
4355
4356 caam_aead_alg_init(t_alg);
4357
4358 err = crypto_register_aead(&t_alg->aead);
4359 if (err) {
4360 pr_warn("%s alg registration failed\n",
4361 t_alg->aead.base.cra_driver_name);
4362 continue;
4363 }
4364
4365 t_alg->registered = true;
4366 registered = true;
4367 }
4368
4369 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304370 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004371
4372 return err;
4373}
4374
4375module_init(caam_algapi_init);
4376module_exit(caam_algapi_exit);
4377
4378MODULE_LICENSE("GPL");
4379MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4380MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");