blob: f1116e7f7cd5af619c29b421f3464eab5721527c [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
Herbert Xu479bcc72015-07-30 17:53:17 +080071#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
Herbert Xuf2147b82015-06-16 13:54:23 +080073
Kim Phillips4427b1b2011-05-14 22:08:17 -050074/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080075#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080076#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080079
Catalin Vasiledaebc462014-10-31 12:45:37 +020080/* Note: Nonce is counted in enckeylen */
Herbert Xu479bcc72015-07-30 17:53:17 +080081#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
Catalin Vasiledaebc462014-10-31 12:45:37 +020082
Horia Geantaae4a8252014-03-14 17:46:52 +020083#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080084#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
Horia Geantaae4a8252014-03-14 17:46:52 +020086
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030087#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080088#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030090
Tudor Ambarusbac68f22014-10-23 16:14:03 +030091#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
Horia Geant?4aad0cc2015-07-30 22:11:18 +030092#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
Tudor Ambarusbac68f22014-10-23 16:14:03 +030094
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020095#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080096#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020098
Yuan Kangacdca312011-07-15 11:21:42 +080099#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
104
Herbert Xu87e51b02015-06-18 14:25:55 +0800105#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500107
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108#ifdef DEBUG
109/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800110#define debug(format, arg...) printk(format, arg)
111#else
112#define debug(format, arg...)
113#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530114static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800115
Herbert Xu479bcc72015-07-30 17:53:17 +0800116struct caam_alg_entry {
117 int class1_alg_type;
118 int class2_alg_type;
119 int alg_op;
120 bool rfc3686;
121 bool geniv;
122};
123
124struct caam_aead_alg {
125 struct aead_alg aead;
126 struct caam_alg_entry caam;
127 bool registered;
128};
129
Yuan Kang1acebad2011-07-15 11:21:42 +0800130/* Set DK bit in class 1 operation if shared */
131static inline void append_dec_op1(u32 *desc, u32 type)
132{
133 u32 *jump_cmd, *uncond_jump_cmd;
134
Horia Geantaa60384d2014-07-11 15:46:58 +0300135 /* DK bit is valid only for AES */
136 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 OP_ALG_DECRYPT);
139 return;
140 }
141
Yuan Kang1acebad2011-07-15 11:21:42 +0800142 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 OP_ALG_DECRYPT);
145 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 set_jump_tgt_here(desc, jump_cmd);
147 append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 set_jump_tgt_here(desc, uncond_jump_cmd);
150}
151
152/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800153 * For aead functions, read payload and write payload,
154 * both of which are specified in req->src and req->dst
155 */
156static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
157{
Horia Geantaae4a8252014-03-14 17:46:52 +0200158 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800159 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800161}
162
163/*
Yuan Kangacdca312011-07-15 11:21:42 +0800164 * For ablkcipher encrypt and decrypt, read from req->src and
165 * write to req->dst
166 */
167static inline void ablkcipher_append_src_dst(u32 *desc)
168{
Kim Phillips70d793c2012-06-22 19:42:35 -0500169 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800174}
175
176/*
Kim Phillips8e8ec592011-03-13 16:54:26 +0800177 * per-session context
178 */
179struct caam_ctx {
180 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800181 u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 dma_addr_t sh_desc_enc_dma;
185 dma_addr_t sh_desc_dec_dma;
186 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800187 u32 class1_alg_type;
188 u32 class2_alg_type;
189 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800190 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800191 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800192 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800193 unsigned int split_key_len;
194 unsigned int split_key_pad_len;
195 unsigned int authsize;
196};
197
Yuan Kang1acebad2011-07-15 11:21:42 +0800198static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200199 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800200{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200201 u32 *nonce;
202 unsigned int enckeylen = ctx->enckeylen;
203
204 /*
205 * RFC3686 specific:
206 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 * | enckeylen = encryption key size + nonce size
208 */
209 if (is_rfc3686)
210 enckeylen -= CTR_RFC3686_NONCE_SIZE;
211
Yuan Kang1acebad2011-07-15 11:21:42 +0800212 if (keys_fit_inline) {
213 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 ctx->split_key_len, CLASS_2 |
215 KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200217 ctx->split_key_pad_len, enckeylen,
218 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad2011-07-15 11:21:42 +0800219 } else {
220 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200223 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
224 }
225
226 /* Load Counter into CONTEXT1 reg */
227 if (is_rfc3686) {
228 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 enckeylen);
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300230 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
231 LDST_CLASS_IND_CCB |
232 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200233 append_move(desc,
234 MOVE_SRC_OUTFIFO |
235 MOVE_DEST_CLASS1CTX |
236 (16 << MOVE_OFFSET_SHIFT) |
237 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800238 }
239}
240
241static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200242 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800243{
244 u32 *key_jump_cmd;
245
Catalin Vasiledaebc462014-10-31 12:45:37 +0200246 /* Note: Context registers are saved. */
247 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad2011-07-15 11:21:42 +0800248
249 /* Skip if already shared */
250 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
251 JUMP_COND_SHRD);
252
Catalin Vasiledaebc462014-10-31 12:45:37 +0200253 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800254
255 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800256}
257
Horia Geantaae4a8252014-03-14 17:46:52 +0200258static int aead_null_set_sh_desc(struct crypto_aead *aead)
259{
Horia Geantaae4a8252014-03-14 17:46:52 +0200260 struct caam_ctx *ctx = crypto_aead_ctx(aead);
261 struct device *jrdev = ctx->jrdev;
262 bool keys_fit_inline = false;
263 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
264 u32 *desc;
265
266 /*
267 * Job Descriptor and Shared Descriptors
268 * must all fit into the 64-word Descriptor h/w Buffer
269 */
Herbert Xu479bcc72015-07-30 17:53:17 +0800270 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
Horia Geantaae4a8252014-03-14 17:46:52 +0200271 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
272 keys_fit_inline = true;
273
Herbert Xu479bcc72015-07-30 17:53:17 +0800274 /* aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200275 desc = ctx->sh_desc_enc;
276
277 init_sh_desc(desc, HDR_SHARE_SERIAL);
278
279 /* Skip if already shared */
280 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
281 JUMP_COND_SHRD);
282 if (keys_fit_inline)
283 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
284 ctx->split_key_len, CLASS_2 |
285 KEY_DEST_MDHA_SPLIT | KEY_ENC);
286 else
287 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
288 KEY_DEST_MDHA_SPLIT | KEY_ENC);
289 set_jump_tgt_here(desc, key_jump_cmd);
290
Herbert Xu479bcc72015-07-30 17:53:17 +0800291 /* assoclen + cryptlen = seqinlen */
292 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200293
Herbert Xu479bcc72015-07-30 17:53:17 +0800294 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200295 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
296 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
297
298 /*
299 * MOVE_LEN opcode is not available in all SEC HW revisions,
300 * thus need to do some magic, i.e. self-patch the descriptor
301 * buffer.
302 */
303 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
304 MOVE_DEST_MATH3 |
305 (0x6 << MOVE_LEN_SHIFT));
306 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
307 MOVE_DEST_DESCBUF |
308 MOVE_WAITCOMP |
309 (0x8 << MOVE_LEN_SHIFT));
310
311 /* Class 2 operation */
312 append_operation(desc, ctx->class2_alg_type |
313 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
314
315 /* Read and write cryptlen bytes */
316 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
317
318 set_move_tgt_here(desc, read_move_cmd);
319 set_move_tgt_here(desc, write_move_cmd);
320 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
321 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
322 MOVE_AUX_LS);
323
324 /* Write ICV */
325 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
326 LDST_SRCDST_BYTE_CONTEXT);
327
328 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
329 desc_bytes(desc),
330 DMA_TO_DEVICE);
331 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
332 dev_err(jrdev, "unable to map shared descriptor\n");
333 return -ENOMEM;
334 }
335#ifdef DEBUG
336 print_hex_dump(KERN_ERR,
337 "aead null enc shdesc@"__stringify(__LINE__)": ",
338 DUMP_PREFIX_ADDRESS, 16, 4, desc,
339 desc_bytes(desc), 1);
340#endif
341
342 /*
343 * Job Descriptor and Shared Descriptors
344 * must all fit into the 64-word Descriptor h/w Buffer
345 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500346 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200347 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
348 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
349 keys_fit_inline = true;
350
351 desc = ctx->sh_desc_dec;
352
Herbert Xu479bcc72015-07-30 17:53:17 +0800353 /* aead_decrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200354 init_sh_desc(desc, HDR_SHARE_SERIAL);
355
356 /* Skip if already shared */
357 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
358 JUMP_COND_SHRD);
359 if (keys_fit_inline)
360 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
361 ctx->split_key_len, CLASS_2 |
362 KEY_DEST_MDHA_SPLIT | KEY_ENC);
363 else
364 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
365 KEY_DEST_MDHA_SPLIT | KEY_ENC);
366 set_jump_tgt_here(desc, key_jump_cmd);
367
368 /* Class 2 operation */
369 append_operation(desc, ctx->class2_alg_type |
370 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
371
Herbert Xu479bcc72015-07-30 17:53:17 +0800372 /* assoclen + cryptlen = seqoutlen */
Horia Geantaae4a8252014-03-14 17:46:52 +0200373 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200374
Herbert Xu479bcc72015-07-30 17:53:17 +0800375 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200376 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
377 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
378
379 /*
380 * MOVE_LEN opcode is not available in all SEC HW revisions,
381 * thus need to do some magic, i.e. self-patch the descriptor
382 * buffer.
383 */
384 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
385 MOVE_DEST_MATH2 |
386 (0x6 << MOVE_LEN_SHIFT));
387 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
388 MOVE_DEST_DESCBUF |
389 MOVE_WAITCOMP |
390 (0x8 << MOVE_LEN_SHIFT));
391
392 /* Read and write cryptlen bytes */
393 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
394
395 /*
396 * Insert a NOP here, since we need at least 4 instructions between
397 * code patching the descriptor buffer and the location being patched.
398 */
399 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
400 set_jump_tgt_here(desc, jump_cmd);
401
402 set_move_tgt_here(desc, read_move_cmd);
403 set_move_tgt_here(desc, write_move_cmd);
404 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
405 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
406 MOVE_AUX_LS);
407 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
408
409 /* Load ICV */
410 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
411 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
412
413 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
414 desc_bytes(desc),
415 DMA_TO_DEVICE);
416 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
417 dev_err(jrdev, "unable to map shared descriptor\n");
418 return -ENOMEM;
419 }
420#ifdef DEBUG
421 print_hex_dump(KERN_ERR,
422 "aead null dec shdesc@"__stringify(__LINE__)": ",
423 DUMP_PREFIX_ADDRESS, 16, 4, desc,
424 desc_bytes(desc), 1);
425#endif
426
427 return 0;
428}
429
Yuan Kang1acebad2011-07-15 11:21:42 +0800430static int aead_set_sh_desc(struct crypto_aead *aead)
431{
Herbert Xu479bcc72015-07-30 17:53:17 +0800432 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
433 struct caam_aead_alg, aead);
Herbert Xuadd86d52015-05-11 17:47:50 +0800434 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800435 struct caam_ctx *ctx = crypto_aead_ctx(aead);
436 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200437 bool keys_fit_inline;
Yuan Kang1acebad2011-07-15 11:21:42 +0800438 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200439 u32 ctx1_iv_off = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +0800440 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200441 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
442 OP_ALG_AAI_CTR_MOD128);
Herbert Xu479bcc72015-07-30 17:53:17 +0800443 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +0800444
Horia Geantă2fdea252016-08-04 20:02:47 +0300445 if (!ctx->authsize)
446 return 0;
447
Horia Geantaae4a8252014-03-14 17:46:52 +0200448 /* NULL encryption / decryption */
449 if (!ctx->enckeylen)
450 return aead_null_set_sh_desc(aead);
451
Yuan Kang1acebad2011-07-15 11:21:42 +0800452 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200453 * AES-CTR needs to load IV in CONTEXT1 reg
454 * at an offset of 128bits (16bytes)
455 * CONTEXT1[255:128] = IV
456 */
457 if (ctr_mode)
458 ctx1_iv_off = 16;
459
460 /*
461 * RFC3686 specific:
462 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
463 */
464 if (is_rfc3686)
465 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
466
Herbert Xu479bcc72015-07-30 17:53:17 +0800467 if (alg->caam.geniv)
468 goto skip_enc;
469
Catalin Vasiledaebc462014-10-31 12:45:37 +0200470 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800471 * Job Descriptor and Shared Descriptors
472 * must all fit into the 64-word Descriptor h/w Buffer
473 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200474 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800475 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200476 ctx->split_key_pad_len + ctx->enckeylen +
477 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800478 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800479 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800480
Herbert Xu479bcc72015-07-30 17:53:17 +0800481 /* aead_encrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800482 desc = ctx->sh_desc_enc;
483
Catalin Vasiledaebc462014-10-31 12:45:37 +0200484 /* Note: Context registers are saved. */
485 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800486
487 /* Class 2 operation */
488 append_operation(desc, ctx->class2_alg_type |
489 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
490
Herbert Xu479bcc72015-07-30 17:53:17 +0800491 /* Read and write assoclen bytes */
492 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
493 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800494
Herbert Xu479bcc72015-07-30 17:53:17 +0800495 /* Skip assoc data */
496 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800497
498 /* read assoc before reading payload */
499 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
Herbert Xu479bcc72015-07-30 17:53:17 +0800500 FIFOLDST_VLF);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200501
502 /* Load Counter into CONTEXT1 reg */
503 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300504 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
505 LDST_SRCDST_BYTE_CONTEXT |
506 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
507 LDST_OFFSET_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800508
509 /* Class 1 operation */
510 append_operation(desc, ctx->class1_alg_type |
511 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
512
513 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800514 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
515 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800516 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
517
518 /* Write ICV */
519 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
520 LDST_SRCDST_BYTE_CONTEXT);
521
522 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
523 desc_bytes(desc),
524 DMA_TO_DEVICE);
525 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
526 dev_err(jrdev, "unable to map shared descriptor\n");
527 return -ENOMEM;
528 }
529#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300530 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800531 DUMP_PREFIX_ADDRESS, 16, 4, desc,
532 desc_bytes(desc), 1);
533#endif
534
Herbert Xu479bcc72015-07-30 17:53:17 +0800535skip_enc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800536 /*
537 * Job Descriptor and Shared Descriptors
538 * must all fit into the 64-word Descriptor h/w Buffer
539 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500540 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800541 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200542 ctx->split_key_pad_len + ctx->enckeylen +
543 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800544 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800545 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800546
Herbert Xu479bcc72015-07-30 17:53:17 +0800547 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800548 desc = ctx->sh_desc_dec;
549
Catalin Vasiledaebc462014-10-31 12:45:37 +0200550 /* Note: Context registers are saved. */
551 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800552
553 /* Class 2 operation */
554 append_operation(desc, ctx->class2_alg_type |
555 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
556
Herbert Xu479bcc72015-07-30 17:53:17 +0800557 /* Read and write assoclen bytes */
558 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
559 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
560
561 /* Skip assoc data */
562 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800563
564 /* read assoc before reading payload */
565 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
566 KEY_VLF);
567
Catalin Vasiledaebc462014-10-31 12:45:37 +0200568 /* Load Counter into CONTEXT1 reg */
569 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300570 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
571 LDST_SRCDST_BYTE_CONTEXT |
572 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
573 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200574
575 /* Choose operation */
576 if (ctr_mode)
577 append_operation(desc, ctx->class1_alg_type |
578 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
579 else
580 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad2011-07-15 11:21:42 +0800581
582 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800583 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
584 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800585 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
586
587 /* Load ICV */
588 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
589 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800590
591 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
592 desc_bytes(desc),
593 DMA_TO_DEVICE);
594 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
595 dev_err(jrdev, "unable to map shared descriptor\n");
596 return -ENOMEM;
597 }
598#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300599 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800600 DUMP_PREFIX_ADDRESS, 16, 4, desc,
601 desc_bytes(desc), 1);
602#endif
603
Herbert Xu479bcc72015-07-30 17:53:17 +0800604 if (!alg->caam.geniv)
605 goto skip_givenc;
606
Yuan Kang1acebad2011-07-15 11:21:42 +0800607 /*
608 * Job Descriptor and Shared Descriptors
609 * must all fit into the 64-word Descriptor h/w Buffer
610 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500611 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800612 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200613 ctx->split_key_pad_len + ctx->enckeylen +
614 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800615 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800616 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800617
618 /* aead_givencrypt shared descriptor */
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300619 desc = ctx->sh_desc_enc;
Yuan Kang1acebad2011-07-15 11:21:42 +0800620
Catalin Vasiledaebc462014-10-31 12:45:37 +0200621 /* Note: Context registers are saved. */
622 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800623
Herbert Xu479bcc72015-07-30 17:53:17 +0800624 if (is_rfc3686)
625 goto copy_iv;
626
Yuan Kang1acebad2011-07-15 11:21:42 +0800627 /* Generate IV */
628 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
629 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800630 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800631 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
632 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
633 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200634 append_move(desc, MOVE_WAITCOMP |
635 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
636 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800637 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800638 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
639
Herbert Xu479bcc72015-07-30 17:53:17 +0800640copy_iv:
Yuan Kang1acebad2011-07-15 11:21:42 +0800641 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200642 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
643 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800644 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800645
646 /* Return to encryption */
647 append_operation(desc, ctx->class2_alg_type |
648 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
649
Herbert Xu479bcc72015-07-30 17:53:17 +0800650 /* Read and write assoclen bytes */
651 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
652 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
653
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300654 /* ivsize + cryptlen = seqoutlen - authsize */
655 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
656
Herbert Xu479bcc72015-07-30 17:53:17 +0800657 /* Skip assoc data */
658 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800659
660 /* read assoc before reading payload */
661 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
662 KEY_VLF);
663
Catalin Vasiledaebc462014-10-31 12:45:37 +0200664 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad2011-07-15 11:21:42 +0800665 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800666 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800667 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
668 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
Herbert Xuadd86d52015-05-11 17:47:50 +0800669 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
Yuan Kang1acebad2011-07-15 11:21:42 +0800670 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
671
Catalin Vasiledaebc462014-10-31 12:45:37 +0200672 /* Load Counter into CONTEXT1 reg */
673 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300674 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
675 LDST_SRCDST_BYTE_CONTEXT |
676 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
677 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200678
Yuan Kang1acebad2011-07-15 11:21:42 +0800679 /* Class 1 operation */
680 append_operation(desc, ctx->class1_alg_type |
681 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
682
683 /* Will write ivsize + cryptlen */
684 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
685
686 /* Not need to reload iv */
Herbert Xuadd86d52015-05-11 17:47:50 +0800687 append_seq_fifo_load(desc, ivsize,
Yuan Kang1acebad2011-07-15 11:21:42 +0800688 FIFOLD_CLASS_SKIP);
689
690 /* Will read cryptlen */
691 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
692 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
693
694 /* Write ICV */
695 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
696 LDST_SRCDST_BYTE_CONTEXT);
697
Herbert Xu479bcc72015-07-30 17:53:17 +0800698 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
699 desc_bytes(desc),
700 DMA_TO_DEVICE);
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300701 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
Yuan Kang1acebad2011-07-15 11:21:42 +0800702 dev_err(jrdev, "unable to map shared descriptor\n");
703 return -ENOMEM;
704 }
705#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300706 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800707 DUMP_PREFIX_ADDRESS, 16, 4, desc,
708 desc_bytes(desc), 1);
709#endif
710
Herbert Xu479bcc72015-07-30 17:53:17 +0800711skip_givenc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800712 return 0;
713}
714
Yuan Kang0e479302011-07-15 11:21:41 +0800715static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800716 unsigned int authsize)
717{
718 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
719
720 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800721 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800722
723 return 0;
724}
725
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300726static int gcm_set_sh_desc(struct crypto_aead *aead)
727{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300728 struct caam_ctx *ctx = crypto_aead_ctx(aead);
729 struct device *jrdev = ctx->jrdev;
730 bool keys_fit_inline = false;
731 u32 *key_jump_cmd, *zero_payload_jump_cmd,
732 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
733 u32 *desc;
734
735 if (!ctx->enckeylen || !ctx->authsize)
736 return 0;
737
738 /*
739 * AES GCM encrypt shared descriptor
740 * Job Descriptor and Shared Descriptor
741 * must fit into the 64-word Descriptor h/w Buffer
742 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800743 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300744 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
745 keys_fit_inline = true;
746
747 desc = ctx->sh_desc_enc;
748
749 init_sh_desc(desc, HDR_SHARE_SERIAL);
750
751 /* skip key loading if they are loaded due to sharing */
752 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
753 JUMP_COND_SHRD | JUMP_COND_SELF);
754 if (keys_fit_inline)
755 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
756 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
757 else
758 append_key(desc, ctx->key_dma, ctx->enckeylen,
759 CLASS_1 | KEY_DEST_CLASS_REG);
760 set_jump_tgt_here(desc, key_jump_cmd);
761
762 /* class 1 operation */
763 append_operation(desc, ctx->class1_alg_type |
764 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
765
Herbert Xuf2147b82015-06-16 13:54:23 +0800766 /* if assoclen + cryptlen is ZERO, skip to ICV write */
767 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
768 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
769 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300770
771 /* if assoclen is ZERO, skip reading the assoc data */
Herbert Xuf2147b82015-06-16 13:54:23 +0800772 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300773 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800774 JUMP_COND_MATH_Z);
775
776 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
777
778 /* skip assoc data */
779 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
780
781 /* cryptlen = seqinlen - assoclen */
782 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
783
784 /* if cryptlen is ZERO jump to zero-payload commands */
785 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
786 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300787
788 /* read assoc data */
789 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
790 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
791 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
792
Herbert Xuf2147b82015-06-16 13:54:23 +0800793 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300794
795 /* write encrypted data */
796 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
797
798 /* read payload data */
799 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
800 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
801
802 /* jump the zero-payload commands */
Herbert Xuf2147b82015-06-16 13:54:23 +0800803 append_jump(desc, JUMP_TEST_ALL | 2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300804
805 /* zero-payload commands */
806 set_jump_tgt_here(desc, zero_payload_jump_cmd);
807
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300808 /* read assoc data */
809 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
810 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
811
Herbert Xuf2147b82015-06-16 13:54:23 +0800812 /* There is no input data */
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300813 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300814
815 /* write ICV */
816 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
817 LDST_SRCDST_BYTE_CONTEXT);
818
819 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
820 desc_bytes(desc),
821 DMA_TO_DEVICE);
822 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
823 dev_err(jrdev, "unable to map shared descriptor\n");
824 return -ENOMEM;
825 }
826#ifdef DEBUG
827 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
828 DUMP_PREFIX_ADDRESS, 16, 4, desc,
829 desc_bytes(desc), 1);
830#endif
831
832 /*
833 * Job Descriptor and Shared Descriptors
834 * must all fit into the 64-word Descriptor h/w Buffer
835 */
836 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800837 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300838 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
839 keys_fit_inline = true;
840
841 desc = ctx->sh_desc_dec;
842
843 init_sh_desc(desc, HDR_SHARE_SERIAL);
844
845 /* skip key loading if they are loaded due to sharing */
846 key_jump_cmd = append_jump(desc, JUMP_JSL |
847 JUMP_TEST_ALL | JUMP_COND_SHRD |
848 JUMP_COND_SELF);
849 if (keys_fit_inline)
850 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
851 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
852 else
853 append_key(desc, ctx->key_dma, ctx->enckeylen,
854 CLASS_1 | KEY_DEST_CLASS_REG);
855 set_jump_tgt_here(desc, key_jump_cmd);
856
857 /* class 1 operation */
858 append_operation(desc, ctx->class1_alg_type |
859 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
860
Herbert Xuf2147b82015-06-16 13:54:23 +0800861 /* if assoclen is ZERO, skip reading the assoc data */
862 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300863 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800864 JUMP_COND_MATH_Z);
865
866 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
867
868 /* skip assoc data */
869 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
870
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300871 /* read assoc data */
872 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
873 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
Herbert Xuf2147b82015-06-16 13:54:23 +0800874
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300875 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
876
Herbert Xuf2147b82015-06-16 13:54:23 +0800877 /* cryptlen = seqoutlen - assoclen */
878 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
879
880 /* jump to zero-payload command if cryptlen is zero */
881 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
882 JUMP_COND_MATH_Z);
883
884 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300885
886 /* store encrypted data */
887 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
888
889 /* read payload data */
890 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
891 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
892
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300893 /* zero-payload command */
894 set_jump_tgt_here(desc, zero_payload_jump_cmd);
895
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300896 /* read ICV */
897 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
898 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
899
900 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
901 desc_bytes(desc),
902 DMA_TO_DEVICE);
903 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
904 dev_err(jrdev, "unable to map shared descriptor\n");
905 return -ENOMEM;
906 }
907#ifdef DEBUG
908 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
909 DUMP_PREFIX_ADDRESS, 16, 4, desc,
910 desc_bytes(desc), 1);
911#endif
912
913 return 0;
914}
915
916static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
917{
918 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
919
920 ctx->authsize = authsize;
921 gcm_set_sh_desc(authenc);
922
923 return 0;
924}
925
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300926static int rfc4106_set_sh_desc(struct crypto_aead *aead)
927{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300928 struct caam_ctx *ctx = crypto_aead_ctx(aead);
929 struct device *jrdev = ctx->jrdev;
930 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800931 u32 *key_jump_cmd;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300932 u32 *desc;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300933
934 if (!ctx->enckeylen || !ctx->authsize)
935 return 0;
936
937 /*
938 * RFC4106 encrypt shared descriptor
939 * Job Descriptor and Shared Descriptor
940 * must fit into the 64-word Descriptor h/w Buffer
941 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800942 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300943 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
944 keys_fit_inline = true;
945
946 desc = ctx->sh_desc_enc;
947
948 init_sh_desc(desc, HDR_SHARE_SERIAL);
949
950 /* Skip key loading if it is loaded due to sharing */
951 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
952 JUMP_COND_SHRD);
953 if (keys_fit_inline)
954 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
955 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
956 else
957 append_key(desc, ctx->key_dma, ctx->enckeylen,
958 CLASS_1 | KEY_DEST_CLASS_REG);
959 set_jump_tgt_here(desc, key_jump_cmd);
960
961 /* Class 1 operation */
962 append_operation(desc, ctx->class1_alg_type |
963 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
964
Herbert Xu46218752015-07-09 07:17:33 +0800965 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300966 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
967
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300968 /* Read assoc data */
969 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
970 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
971
Herbert Xu46218752015-07-09 07:17:33 +0800972 /* Skip IV */
973 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +0800974
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300975 /* Will read cryptlen bytes */
Herbert Xuf2147b82015-06-16 13:54:23 +0800976 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300977
Horia Geant?4aad0cc2015-07-30 22:11:18 +0300978 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
979 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300980
Herbert Xu46218752015-07-09 07:17:33 +0800981 /* Skip assoc data */
982 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
983
984 /* cryptlen = seqoutlen - assoclen */
Horia Geant?4aad0cc2015-07-30 22:11:18 +0300985 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
Herbert Xu46218752015-07-09 07:17:33 +0800986
987 /* Write encrypted data */
988 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
989
Horia Geant?4aad0cc2015-07-30 22:11:18 +0300990 /* Read payload data */
991 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
992 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
993
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300994 /* Write ICV */
995 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
996 LDST_SRCDST_BYTE_CONTEXT);
997
998 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
999 desc_bytes(desc),
1000 DMA_TO_DEVICE);
1001 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1002 dev_err(jrdev, "unable to map shared descriptor\n");
1003 return -ENOMEM;
1004 }
1005#ifdef DEBUG
1006 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1007 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1008 desc_bytes(desc), 1);
1009#endif
1010
1011 /*
1012 * Job Descriptor and Shared Descriptors
1013 * must all fit into the 64-word Descriptor h/w Buffer
1014 */
1015 keys_fit_inline = false;
1016 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1017 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1018 keys_fit_inline = true;
1019
1020 desc = ctx->sh_desc_dec;
1021
1022 init_sh_desc(desc, HDR_SHARE_SERIAL);
1023
1024 /* Skip key loading if it is loaded due to sharing */
1025 key_jump_cmd = append_jump(desc, JUMP_JSL |
1026 JUMP_TEST_ALL | JUMP_COND_SHRD);
1027 if (keys_fit_inline)
1028 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1029 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1030 else
1031 append_key(desc, ctx->key_dma, ctx->enckeylen,
1032 CLASS_1 | KEY_DEST_CLASS_REG);
1033 set_jump_tgt_here(desc, key_jump_cmd);
1034
1035 /* Class 1 operation */
1036 append_operation(desc, ctx->class1_alg_type |
1037 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1038
Herbert Xu46218752015-07-09 07:17:33 +08001039 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Herbert Xuf2147b82015-06-16 13:54:23 +08001040 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001041
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001042 /* Read assoc data */
1043 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1044 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1045
Herbert Xu46218752015-07-09 07:17:33 +08001046 /* Skip IV */
1047 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001048
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001049 /* Will read cryptlen bytes */
Herbert Xu46218752015-07-09 07:17:33 +08001050 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001051
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001052 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1053 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001054
Herbert Xu46218752015-07-09 07:17:33 +08001055 /* Skip assoc data */
1056 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1057
1058 /* Will write cryptlen bytes */
1059 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1060
1061 /* Store payload data */
1062 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1063
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001064 /* Read encrypted data */
1065 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1066 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1067
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001068 /* Read ICV */
1069 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1070 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1071
1072 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1073 desc_bytes(desc),
1074 DMA_TO_DEVICE);
1075 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1076 dev_err(jrdev, "unable to map shared descriptor\n");
1077 return -ENOMEM;
1078 }
1079#ifdef DEBUG
1080 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1081 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1082 desc_bytes(desc), 1);
1083#endif
1084
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001085 return 0;
1086}
1087
1088static int rfc4106_setauthsize(struct crypto_aead *authenc,
1089 unsigned int authsize)
1090{
1091 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1092
1093 ctx->authsize = authsize;
1094 rfc4106_set_sh_desc(authenc);
1095
1096 return 0;
1097}
1098
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001099static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1100{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001101 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1102 struct device *jrdev = ctx->jrdev;
1103 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001104 u32 *key_jump_cmd;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001105 u32 *read_move_cmd, *write_move_cmd;
1106 u32 *desc;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001107
1108 if (!ctx->enckeylen || !ctx->authsize)
1109 return 0;
1110
1111 /*
1112 * RFC4543 encrypt shared descriptor
1113 * Job Descriptor and Shared Descriptor
1114 * must fit into the 64-word Descriptor h/w Buffer
1115 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001116 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001117 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1118 keys_fit_inline = true;
1119
1120 desc = ctx->sh_desc_enc;
1121
1122 init_sh_desc(desc, HDR_SHARE_SERIAL);
1123
1124 /* Skip key loading if it is loaded due to sharing */
1125 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1126 JUMP_COND_SHRD);
1127 if (keys_fit_inline)
1128 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1129 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1130 else
1131 append_key(desc, ctx->key_dma, ctx->enckeylen,
1132 CLASS_1 | KEY_DEST_CLASS_REG);
1133 set_jump_tgt_here(desc, key_jump_cmd);
1134
1135 /* Class 1 operation */
1136 append_operation(desc, ctx->class1_alg_type |
1137 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1138
Herbert Xuf2147b82015-06-16 13:54:23 +08001139 /* assoclen + cryptlen = seqinlen */
1140 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001141
1142 /*
1143 * MOVE_LEN opcode is not available in all SEC HW revisions,
1144 * thus need to do some magic, i.e. self-patch the descriptor
1145 * buffer.
1146 */
1147 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1148 (0x6 << MOVE_LEN_SHIFT));
1149 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1150 (0x8 << MOVE_LEN_SHIFT));
1151
Herbert Xuf2147b82015-06-16 13:54:23 +08001152 /* Will read assoclen + cryptlen bytes */
1153 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001154
Herbert Xuf2147b82015-06-16 13:54:23 +08001155 /* Will write assoclen + cryptlen bytes */
1156 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1157
1158 /* Read and write assoclen + cryptlen bytes */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001159 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1160
1161 set_move_tgt_here(desc, read_move_cmd);
1162 set_move_tgt_here(desc, write_move_cmd);
1163 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1164 /* Move payload data to OFIFO */
1165 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1166
1167 /* Write ICV */
1168 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1169 LDST_SRCDST_BYTE_CONTEXT);
1170
1171 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1172 desc_bytes(desc),
1173 DMA_TO_DEVICE);
1174 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1175 dev_err(jrdev, "unable to map shared descriptor\n");
1176 return -ENOMEM;
1177 }
1178#ifdef DEBUG
1179 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1180 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1181 desc_bytes(desc), 1);
1182#endif
1183
1184 /*
1185 * Job Descriptor and Shared Descriptors
1186 * must all fit into the 64-word Descriptor h/w Buffer
1187 */
1188 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001189 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001190 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1191 keys_fit_inline = true;
1192
1193 desc = ctx->sh_desc_dec;
1194
1195 init_sh_desc(desc, HDR_SHARE_SERIAL);
1196
1197 /* Skip key loading if it is loaded due to sharing */
1198 key_jump_cmd = append_jump(desc, JUMP_JSL |
1199 JUMP_TEST_ALL | JUMP_COND_SHRD);
1200 if (keys_fit_inline)
1201 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1202 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1203 else
1204 append_key(desc, ctx->key_dma, ctx->enckeylen,
1205 CLASS_1 | KEY_DEST_CLASS_REG);
1206 set_jump_tgt_here(desc, key_jump_cmd);
1207
1208 /* Class 1 operation */
1209 append_operation(desc, ctx->class1_alg_type |
1210 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1211
Herbert Xuf2147b82015-06-16 13:54:23 +08001212 /* assoclen + cryptlen = seqoutlen */
1213 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001214
1215 /*
1216 * MOVE_LEN opcode is not available in all SEC HW revisions,
1217 * thus need to do some magic, i.e. self-patch the descriptor
1218 * buffer.
1219 */
1220 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1221 (0x6 << MOVE_LEN_SHIFT));
1222 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1223 (0x8 << MOVE_LEN_SHIFT));
1224
Herbert Xuf2147b82015-06-16 13:54:23 +08001225 /* Will read assoclen + cryptlen bytes */
1226 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001227
Herbert Xuf2147b82015-06-16 13:54:23 +08001228 /* Will write assoclen + cryptlen bytes */
1229 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001230
1231 /* Store payload data */
1232 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1233
Herbert Xuf2147b82015-06-16 13:54:23 +08001234 /* In-snoop assoclen + cryptlen data */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001235 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1236 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1237
1238 set_move_tgt_here(desc, read_move_cmd);
1239 set_move_tgt_here(desc, write_move_cmd);
1240 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1241 /* Move payload data to OFIFO */
1242 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1243 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1244
1245 /* Read ICV */
1246 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1247 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1248
1249 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1250 desc_bytes(desc),
1251 DMA_TO_DEVICE);
1252 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1253 dev_err(jrdev, "unable to map shared descriptor\n");
1254 return -ENOMEM;
1255 }
1256#ifdef DEBUG
1257 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1258 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1259 desc_bytes(desc), 1);
1260#endif
1261
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001262 return 0;
1263}
1264
1265static int rfc4543_setauthsize(struct crypto_aead *authenc,
1266 unsigned int authsize)
1267{
1268 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1269
1270 ctx->authsize = authsize;
1271 rfc4543_set_sh_desc(authenc);
1272
1273 return 0;
1274}
1275
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001276static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1277 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001278{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001279 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1280 ctx->split_key_pad_len, key_in, authkeylen,
1281 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001282}
1283
Yuan Kang0e479302011-07-15 11:21:41 +08001284static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001285 const u8 *key, unsigned int keylen)
1286{
1287 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1288 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1289 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1290 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001291 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001292 int ret = 0;
1293
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001294 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001295 goto badkey;
1296
1297 /* Pick class 2 key length from algorithm submask */
1298 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1299 OP_ALG_ALGSEL_SHIFT] * 2;
1300 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1301
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001302 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1303 goto badkey;
1304
Kim Phillips8e8ec592011-03-13 16:54:26 +08001305#ifdef DEBUG
1306 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001307 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1308 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001309 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1310 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001311 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001312 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1313#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001314
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001315 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001316 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001317 goto badkey;
1318 }
1319
1320 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001321 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001322
Yuan Kang885e9e22011-07-15 11:21:41 +08001323 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001324 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001325 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001326 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001327 return -ENOMEM;
1328 }
1329#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001330 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001331 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001332 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001333#endif
1334
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001335 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001336
Yuan Kang1acebad2011-07-15 11:21:42 +08001337 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001338 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001339 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001340 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001341 }
1342
1343 return ret;
1344badkey:
1345 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1346 return -EINVAL;
1347}
1348
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001349static int gcm_setkey(struct crypto_aead *aead,
1350 const u8 *key, unsigned int keylen)
1351{
1352 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1353 struct device *jrdev = ctx->jrdev;
1354 int ret = 0;
1355
1356#ifdef DEBUG
1357 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1358 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1359#endif
1360
1361 memcpy(ctx->key, key, keylen);
1362 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1363 DMA_TO_DEVICE);
1364 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1365 dev_err(jrdev, "unable to map key i/o memory\n");
1366 return -ENOMEM;
1367 }
1368 ctx->enckeylen = keylen;
1369
1370 ret = gcm_set_sh_desc(aead);
1371 if (ret) {
1372 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1373 DMA_TO_DEVICE);
1374 }
1375
1376 return ret;
1377}
1378
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001379static int rfc4106_setkey(struct crypto_aead *aead,
1380 const u8 *key, unsigned int keylen)
1381{
1382 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1383 struct device *jrdev = ctx->jrdev;
1384 int ret = 0;
1385
1386 if (keylen < 4)
1387 return -EINVAL;
1388
1389#ifdef DEBUG
1390 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1391 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1392#endif
1393
1394 memcpy(ctx->key, key, keylen);
1395
1396 /*
1397 * The last four bytes of the key material are used as the salt value
1398 * in the nonce. Update the AES key length.
1399 */
1400 ctx->enckeylen = keylen - 4;
1401
1402 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1403 DMA_TO_DEVICE);
1404 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1405 dev_err(jrdev, "unable to map key i/o memory\n");
1406 return -ENOMEM;
1407 }
1408
1409 ret = rfc4106_set_sh_desc(aead);
1410 if (ret) {
1411 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1412 DMA_TO_DEVICE);
1413 }
1414
1415 return ret;
1416}
1417
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001418static int rfc4543_setkey(struct crypto_aead *aead,
1419 const u8 *key, unsigned int keylen)
1420{
1421 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1422 struct device *jrdev = ctx->jrdev;
1423 int ret = 0;
1424
1425 if (keylen < 4)
1426 return -EINVAL;
1427
1428#ifdef DEBUG
1429 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1430 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1431#endif
1432
1433 memcpy(ctx->key, key, keylen);
1434
1435 /*
1436 * The last four bytes of the key material are used as the salt value
1437 * in the nonce. Update the AES key length.
1438 */
1439 ctx->enckeylen = keylen - 4;
1440
1441 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1442 DMA_TO_DEVICE);
1443 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1444 dev_err(jrdev, "unable to map key i/o memory\n");
1445 return -ENOMEM;
1446 }
1447
1448 ret = rfc4543_set_sh_desc(aead);
1449 if (ret) {
1450 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1451 DMA_TO_DEVICE);
1452 }
1453
1454 return ret;
1455}
1456
Yuan Kangacdca312011-07-15 11:21:42 +08001457static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1458 const u8 *key, unsigned int keylen)
1459{
1460 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001461 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1462 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1463 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001464 struct device *jrdev = ctx->jrdev;
1465 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001466 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001467 u32 *desc;
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001468 u8 *nonce;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001469 u32 geniv;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001470 u32 ctx1_iv_off = 0;
1471 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1472 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001473 const bool is_rfc3686 = (ctr_mode &&
1474 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001475
1476#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001477 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001478 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1479#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001480 /*
1481 * AES-CTR needs to load IV in CONTEXT1 reg
1482 * at an offset of 128bits (16bytes)
1483 * CONTEXT1[255:128] = IV
1484 */
1485 if (ctr_mode)
1486 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001487
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001488 /*
1489 * RFC3686 specific:
1490 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1491 * | *key = {KEY, NONCE}
1492 */
1493 if (is_rfc3686) {
1494 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1495 keylen -= CTR_RFC3686_NONCE_SIZE;
1496 }
1497
Yuan Kangacdca312011-07-15 11:21:42 +08001498 memcpy(ctx->key, key, keylen);
1499 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1500 DMA_TO_DEVICE);
1501 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1502 dev_err(jrdev, "unable to map key i/o memory\n");
1503 return -ENOMEM;
1504 }
1505 ctx->enckeylen = keylen;
1506
1507 /* ablkcipher_encrypt shared descriptor */
1508 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001509 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001510 /* Skip if already shared */
1511 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1512 JUMP_COND_SHRD);
1513
1514 /* Load class1 key only */
1515 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1516 ctx->enckeylen, CLASS_1 |
1517 KEY_DEST_CLASS_REG);
1518
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001519 /* Load nonce into CONTEXT1 reg */
1520 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001521 nonce = (u8 *)key + keylen;
1522 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1523 LDST_CLASS_IND_CCB |
1524 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001525 append_move(desc, MOVE_WAITCOMP |
1526 MOVE_SRC_OUTFIFO |
1527 MOVE_DEST_CLASS1CTX |
1528 (16 << MOVE_OFFSET_SHIFT) |
1529 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1530 }
1531
Yuan Kangacdca312011-07-15 11:21:42 +08001532 set_jump_tgt_here(desc, key_jump_cmd);
1533
Yuan Kangacdca312011-07-15 11:21:42 +08001534 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001535 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001536 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001537
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001538 /* Load counter into CONTEXT1 reg */
1539 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001540 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1541 LDST_SRCDST_BYTE_CONTEXT |
1542 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1543 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001544
Yuan Kangacdca312011-07-15 11:21:42 +08001545 /* Load operation */
1546 append_operation(desc, ctx->class1_alg_type |
1547 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1548
1549 /* Perform operation */
1550 ablkcipher_append_src_dst(desc);
1551
1552 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1553 desc_bytes(desc),
1554 DMA_TO_DEVICE);
1555 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1556 dev_err(jrdev, "unable to map shared descriptor\n");
1557 return -ENOMEM;
1558 }
1559#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001560 print_hex_dump(KERN_ERR,
1561 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001562 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1563 desc_bytes(desc), 1);
1564#endif
1565 /* ablkcipher_decrypt shared descriptor */
1566 desc = ctx->sh_desc_dec;
1567
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001568 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001569 /* Skip if already shared */
1570 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1571 JUMP_COND_SHRD);
1572
1573 /* Load class1 key only */
1574 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1575 ctx->enckeylen, CLASS_1 |
1576 KEY_DEST_CLASS_REG);
1577
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001578 /* Load nonce into CONTEXT1 reg */
1579 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001580 nonce = (u8 *)key + keylen;
1581 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1582 LDST_CLASS_IND_CCB |
1583 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001584 append_move(desc, MOVE_WAITCOMP |
1585 MOVE_SRC_OUTFIFO |
1586 MOVE_DEST_CLASS1CTX |
1587 (16 << MOVE_OFFSET_SHIFT) |
1588 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1589 }
1590
Yuan Kangacdca312011-07-15 11:21:42 +08001591 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001592
1593 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001594 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001595 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001596
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001597 /* Load counter into CONTEXT1 reg */
1598 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001599 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1600 LDST_SRCDST_BYTE_CONTEXT |
1601 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1602 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001603
Yuan Kangacdca312011-07-15 11:21:42 +08001604 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001605 if (ctr_mode)
1606 append_operation(desc, ctx->class1_alg_type |
1607 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1608 else
1609 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001610
1611 /* Perform operation */
1612 ablkcipher_append_src_dst(desc);
1613
Yuan Kangacdca312011-07-15 11:21:42 +08001614 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1615 desc_bytes(desc),
1616 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001617 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001618 dev_err(jrdev, "unable to map shared descriptor\n");
1619 return -ENOMEM;
1620 }
1621
1622#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001623 print_hex_dump(KERN_ERR,
1624 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001625 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1626 desc_bytes(desc), 1);
1627#endif
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001628 /* ablkcipher_givencrypt shared descriptor */
1629 desc = ctx->sh_desc_givenc;
1630
1631 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1632 /* Skip if already shared */
1633 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1634 JUMP_COND_SHRD);
1635
1636 /* Load class1 key only */
1637 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1638 ctx->enckeylen, CLASS_1 |
1639 KEY_DEST_CLASS_REG);
1640
1641 /* Load Nonce into CONTEXT1 reg */
1642 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001643 nonce = (u8 *)key + keylen;
1644 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1645 LDST_CLASS_IND_CCB |
1646 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001647 append_move(desc, MOVE_WAITCOMP |
1648 MOVE_SRC_OUTFIFO |
1649 MOVE_DEST_CLASS1CTX |
1650 (16 << MOVE_OFFSET_SHIFT) |
1651 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1652 }
1653 set_jump_tgt_here(desc, key_jump_cmd);
1654
1655 /* Generate IV */
1656 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1657 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1658 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1659 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1660 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1661 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1662 append_move(desc, MOVE_WAITCOMP |
1663 MOVE_SRC_INFIFO |
1664 MOVE_DEST_CLASS1CTX |
1665 (crt->ivsize << MOVE_LEN_SHIFT) |
1666 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1667 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1668
1669 /* Copy generated IV to memory */
1670 append_seq_store(desc, crt->ivsize,
1671 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1672 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1673
1674 /* Load Counter into CONTEXT1 reg */
1675 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001676 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1677 LDST_SRCDST_BYTE_CONTEXT |
1678 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1679 LDST_OFFSET_SHIFT));
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001680
1681 if (ctx1_iv_off)
1682 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1683 (1 << JUMP_OFFSET_SHIFT));
1684
1685 /* Load operation */
1686 append_operation(desc, ctx->class1_alg_type |
1687 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1688
1689 /* Perform operation */
1690 ablkcipher_append_src_dst(desc);
1691
1692 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1693 desc_bytes(desc),
1694 DMA_TO_DEVICE);
1695 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1696 dev_err(jrdev, "unable to map shared descriptor\n");
1697 return -ENOMEM;
1698 }
1699#ifdef DEBUG
1700 print_hex_dump(KERN_ERR,
1701 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1702 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1703 desc_bytes(desc), 1);
1704#endif
Yuan Kangacdca312011-07-15 11:21:42 +08001705
1706 return ret;
1707}
1708
Catalin Vasilec6415a62015-10-02 13:13:18 +03001709static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1710 const u8 *key, unsigned int keylen)
1711{
1712 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1713 struct device *jrdev = ctx->jrdev;
1714 u32 *key_jump_cmd, *desc;
1715 __be64 sector_size = cpu_to_be64(512);
1716
1717 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1718 crypto_ablkcipher_set_flags(ablkcipher,
1719 CRYPTO_TFM_RES_BAD_KEY_LEN);
1720 dev_err(jrdev, "key size mismatch\n");
1721 return -EINVAL;
1722 }
1723
1724 memcpy(ctx->key, key, keylen);
1725 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1726 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1727 dev_err(jrdev, "unable to map key i/o memory\n");
1728 return -ENOMEM;
1729 }
1730 ctx->enckeylen = keylen;
1731
1732 /* xts_ablkcipher_encrypt shared descriptor */
1733 desc = ctx->sh_desc_enc;
1734 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1735 /* Skip if already shared */
1736 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1737 JUMP_COND_SHRD);
1738
1739 /* Load class1 keys only */
1740 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1741 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1742
1743 /* Load sector size with index 40 bytes (0x28) */
1744 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1745 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1746 append_data(desc, (void *)&sector_size, 8);
1747
1748 set_jump_tgt_here(desc, key_jump_cmd);
1749
1750 /*
1751 * create sequence for loading the sector index
1752 * Upper 8B of IV - will be used as sector index
1753 * Lower 8B of IV - will be discarded
1754 */
1755 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1756 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1757 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1758
1759 /* Load operation */
1760 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1761 OP_ALG_ENCRYPT);
1762
1763 /* Perform operation */
1764 ablkcipher_append_src_dst(desc);
1765
1766 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1767 DMA_TO_DEVICE);
1768 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1769 dev_err(jrdev, "unable to map shared descriptor\n");
1770 return -ENOMEM;
1771 }
1772#ifdef DEBUG
1773 print_hex_dump(KERN_ERR,
1774 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1775 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1776#endif
1777
1778 /* xts_ablkcipher_decrypt shared descriptor */
1779 desc = ctx->sh_desc_dec;
1780
1781 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1782 /* Skip if already shared */
1783 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1784 JUMP_COND_SHRD);
1785
1786 /* Load class1 key only */
1787 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1788 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1789
1790 /* Load sector size with index 40 bytes (0x28) */
1791 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1792 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1793 append_data(desc, (void *)&sector_size, 8);
1794
1795 set_jump_tgt_here(desc, key_jump_cmd);
1796
1797 /*
1798 * create sequence for loading the sector index
1799 * Upper 8B of IV - will be used as sector index
1800 * Lower 8B of IV - will be discarded
1801 */
1802 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1803 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1804 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1805
1806 /* Load operation */
1807 append_dec_op1(desc, ctx->class1_alg_type);
1808
1809 /* Perform operation */
1810 ablkcipher_append_src_dst(desc);
1811
1812 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1813 DMA_TO_DEVICE);
1814 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1815 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1816 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1817 dev_err(jrdev, "unable to map shared descriptor\n");
1818 return -ENOMEM;
1819 }
1820#ifdef DEBUG
1821 print_hex_dump(KERN_ERR,
1822 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1823 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1824#endif
1825
1826 return 0;
1827}
1828
Kim Phillips8e8ec592011-03-13 16:54:26 +08001829/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001830 * aead_edesc - s/w-extended aead descriptor
1831 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Kim Phillips8e8ec592011-03-13 16:54:26 +08001832 * @src_nents: number of segments in input scatterlist
1833 * @dst_nents: number of segments in output scatterlist
Yuan Kang1acebad2011-07-15 11:21:42 +08001834 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001835 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001836 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1837 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001838 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1839 */
Yuan Kang0e479302011-07-15 11:21:41 +08001840struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001841 int assoc_nents;
1842 int src_nents;
1843 int dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001844 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001845 int sec4_sg_bytes;
1846 dma_addr_t sec4_sg_dma;
1847 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +08001848 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +08001849};
1850
Yuan Kangacdca312011-07-15 11:21:42 +08001851/*
1852 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1853 * @src_nents: number of segments in input scatterlist
1854 * @dst_nents: number of segments in output scatterlist
1855 * @iv_dma: dma address of iv for checking continuity and link table
1856 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001857 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1858 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001859 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1860 */
1861struct ablkcipher_edesc {
1862 int src_nents;
1863 int dst_nents;
1864 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001865 int sec4_sg_bytes;
1866 dma_addr_t sec4_sg_dma;
1867 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001868 u32 hw_desc[0];
1869};
1870
Yuan Kang1acebad2011-07-15 11:21:42 +08001871static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001872 struct scatterlist *dst, int src_nents,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001873 int dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -05001874 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1875 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001876{
Yuan Kang643b39b2012-06-22 19:48:49 -05001877 if (dst != src) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001878 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1879 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001880 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001881 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001882 }
1883
Yuan Kang1acebad2011-07-15 11:21:42 +08001884 if (iv_dma)
1885 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001886 if (sec4_sg_bytes)
1887 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001888 DMA_TO_DEVICE);
1889}
1890
Yuan Kang1acebad2011-07-15 11:21:42 +08001891static void aead_unmap(struct device *dev,
1892 struct aead_edesc *edesc,
1893 struct aead_request *req)
1894{
Herbert Xuf2147b82015-06-16 13:54:23 +08001895 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001896 edesc->src_nents, edesc->dst_nents, 0, 0,
Herbert Xuf2147b82015-06-16 13:54:23 +08001897 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1898}
1899
Yuan Kangacdca312011-07-15 11:21:42 +08001900static void ablkcipher_unmap(struct device *dev,
1901 struct ablkcipher_edesc *edesc,
1902 struct ablkcipher_request *req)
1903{
1904 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1905 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1906
1907 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001908 edesc->src_nents, edesc->dst_nents,
1909 edesc->iv_dma, ivsize,
Yuan Kang643b39b2012-06-22 19:48:49 -05001910 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001911}
1912
Yuan Kang0e479302011-07-15 11:21:41 +08001913static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001914 void *context)
1915{
Yuan Kang0e479302011-07-15 11:21:41 +08001916 struct aead_request *req = context;
1917 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001918
1919#ifdef DEBUG
1920 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1921#endif
1922
1923 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1924
1925 if (err)
1926 caam_jr_strstatus(jrdev, err);
1927
1928 aead_unmap(jrdev, edesc, req);
1929
1930 kfree(edesc);
1931
1932 aead_request_complete(req, err);
1933}
1934
Yuan Kang0e479302011-07-15 11:21:41 +08001935static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001936 void *context)
1937{
Yuan Kang0e479302011-07-15 11:21:41 +08001938 struct aead_request *req = context;
1939 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001940
1941#ifdef DEBUG
1942 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1943#endif
1944
1945 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1946
1947 if (err)
1948 caam_jr_strstatus(jrdev, err);
1949
1950 aead_unmap(jrdev, edesc, req);
1951
1952 /*
1953 * verify hw auth check passed else return -EBADMSG
1954 */
1955 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1956 err = -EBADMSG;
1957
1958 kfree(edesc);
1959
1960 aead_request_complete(req, err);
1961}
1962
Yuan Kangacdca312011-07-15 11:21:42 +08001963static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1964 void *context)
1965{
1966 struct ablkcipher_request *req = context;
1967 struct ablkcipher_edesc *edesc;
1968#ifdef DEBUG
1969 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1970 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1971
1972 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1973#endif
1974
1975 edesc = (struct ablkcipher_edesc *)((char *)desc -
1976 offsetof(struct ablkcipher_edesc, hw_desc));
1977
Marek Vasutfa9659c2014-04-24 20:05:12 +02001978 if (err)
1979 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001980
1981#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001982 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001983 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1984 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001985 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001986 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1987 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1988#endif
1989
1990 ablkcipher_unmap(jrdev, edesc, req);
1991 kfree(edesc);
1992
1993 ablkcipher_request_complete(req, err);
1994}
1995
1996static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1997 void *context)
1998{
1999 struct ablkcipher_request *req = context;
2000 struct ablkcipher_edesc *edesc;
2001#ifdef DEBUG
2002 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2003 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2004
2005 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2006#endif
2007
2008 edesc = (struct ablkcipher_edesc *)((char *)desc -
2009 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002010 if (err)
2011 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002012
2013#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002014 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002015 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2016 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002017 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002018 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2019 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2020#endif
2021
2022 ablkcipher_unmap(jrdev, edesc, req);
2023 kfree(edesc);
2024
2025 ablkcipher_request_complete(req, err);
2026}
2027
Kim Phillips8e8ec592011-03-13 16:54:26 +08002028/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002029 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002030 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002031static void init_aead_job(struct aead_request *req,
2032 struct aead_edesc *edesc,
2033 bool all_contig, bool encrypt)
2034{
2035 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2036 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2037 int authsize = ctx->authsize;
2038 u32 *desc = edesc->hw_desc;
2039 u32 out_options, in_options;
2040 dma_addr_t dst_dma, src_dma;
2041 int len, sec4_sg_index = 0;
2042 dma_addr_t ptr;
2043 u32 *sh_desc;
2044
2045 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2046 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2047
2048 len = desc_len(sh_desc);
2049 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2050
2051 if (all_contig) {
2052 src_dma = sg_dma_address(req->src);
2053 in_options = 0;
2054 } else {
2055 src_dma = edesc->sec4_sg_dma;
2056 sec4_sg_index += edesc->src_nents;
2057 in_options = LDST_SGF;
2058 }
2059
2060 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2061 in_options);
2062
2063 dst_dma = src_dma;
2064 out_options = in_options;
2065
2066 if (unlikely(req->src != req->dst)) {
2067 if (!edesc->dst_nents) {
2068 dst_dma = sg_dma_address(req->dst);
2069 } else {
2070 dst_dma = edesc->sec4_sg_dma +
2071 sec4_sg_index *
2072 sizeof(struct sec4_sg_entry);
2073 out_options = LDST_SGF;
2074 }
2075 }
2076
2077 if (encrypt)
2078 append_seq_out_ptr(desc, dst_dma,
2079 req->assoclen + req->cryptlen + authsize,
2080 out_options);
2081 else
2082 append_seq_out_ptr(desc, dst_dma,
2083 req->assoclen + req->cryptlen - authsize,
2084 out_options);
2085
2086 /* REG3 = assoclen */
2087 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2088}
2089
2090static void init_gcm_job(struct aead_request *req,
2091 struct aead_edesc *edesc,
2092 bool all_contig, bool encrypt)
2093{
2094 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2095 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2096 unsigned int ivsize = crypto_aead_ivsize(aead);
2097 u32 *desc = edesc->hw_desc;
2098 bool generic_gcm = (ivsize == 12);
2099 unsigned int last;
2100
2101 init_aead_job(req, edesc, all_contig, encrypt);
2102
2103 /* BUG This should not be specific to generic GCM. */
2104 last = 0;
2105 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2106 last = FIFOLD_TYPE_LAST1;
2107
2108 /* Read GCM IV */
2109 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2110 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2111 /* Append Salt */
2112 if (!generic_gcm)
2113 append_data(desc, ctx->key + ctx->enckeylen, 4);
2114 /* Append IV */
2115 append_data(desc, req->iv, ivsize);
2116 /* End of blank commands */
2117}
2118
Herbert Xu479bcc72015-07-30 17:53:17 +08002119static void init_authenc_job(struct aead_request *req,
2120 struct aead_edesc *edesc,
2121 bool all_contig, bool encrypt)
Yuan Kang1acebad2011-07-15 11:21:42 +08002122{
2123 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Herbert Xu479bcc72015-07-30 17:53:17 +08002124 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2125 struct caam_aead_alg, aead);
2126 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002127 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Herbert Xu479bcc72015-07-30 17:53:17 +08002128 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2129 OP_ALG_AAI_CTR_MOD128);
2130 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +08002131 u32 *desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002132 u32 ivoffset = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002133
Herbert Xu479bcc72015-07-30 17:53:17 +08002134 /*
2135 * AES-CTR needs to load IV in CONTEXT1 reg
2136 * at an offset of 128bits (16bytes)
2137 * CONTEXT1[255:128] = IV
2138 */
2139 if (ctr_mode)
2140 ivoffset = 16;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002141
Herbert Xu479bcc72015-07-30 17:53:17 +08002142 /*
2143 * RFC3686 specific:
2144 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2145 */
2146 if (is_rfc3686)
2147 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002148
Herbert Xu479bcc72015-07-30 17:53:17 +08002149 init_aead_job(req, edesc, all_contig, encrypt);
Yuan Kang1acebad2011-07-15 11:21:42 +08002150
Herbert Xu479bcc72015-07-30 17:53:17 +08002151 if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
2152 append_load_as_imm(desc, req->iv, ivsize,
2153 LDST_CLASS_1_CCB |
2154 LDST_SRCDST_BYTE_CONTEXT |
2155 (ivoffset << LDST_OFFSET_SHIFT));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002156}
2157
2158/*
Yuan Kangacdca312011-07-15 11:21:42 +08002159 * Fill in ablkcipher job descriptor
2160 */
2161static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2162 struct ablkcipher_edesc *edesc,
2163 struct ablkcipher_request *req,
2164 bool iv_contig)
2165{
2166 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2167 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2168 u32 *desc = edesc->hw_desc;
2169 u32 out_options = 0, in_options;
2170 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002171 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002172
2173#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002174 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002175 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2176 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002177 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002178 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2179 edesc->src_nents ? 100 : req->nbytes, 1);
2180#endif
2181
2182 len = desc_len(sh_desc);
2183 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2184
2185 if (iv_contig) {
2186 src_dma = edesc->iv_dma;
2187 in_options = 0;
2188 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002189 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02002190 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08002191 in_options = LDST_SGF;
2192 }
2193 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2194
2195 if (likely(req->src == req->dst)) {
2196 if (!edesc->src_nents && iv_contig) {
2197 dst_dma = sg_dma_address(req->src);
2198 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002199 dst_dma = edesc->sec4_sg_dma +
2200 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002201 out_options = LDST_SGF;
2202 }
2203 } else {
2204 if (!edesc->dst_nents) {
2205 dst_dma = sg_dma_address(req->dst);
2206 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002207 dst_dma = edesc->sec4_sg_dma +
2208 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002209 out_options = LDST_SGF;
2210 }
2211 }
2212 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2213}
2214
2215/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002216 * Fill in ablkcipher givencrypt job descriptor
2217 */
2218static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2219 struct ablkcipher_edesc *edesc,
2220 struct ablkcipher_request *req,
2221 bool iv_contig)
2222{
2223 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2224 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2225 u32 *desc = edesc->hw_desc;
2226 u32 out_options, in_options;
2227 dma_addr_t dst_dma, src_dma;
2228 int len, sec4_sg_index = 0;
2229
2230#ifdef DEBUG
2231 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2232 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2233 ivsize, 1);
2234 print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2235 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2236 edesc->src_nents ? 100 : req->nbytes, 1);
2237#endif
2238
2239 len = desc_len(sh_desc);
2240 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2241
2242 if (!edesc->src_nents) {
2243 src_dma = sg_dma_address(req->src);
2244 in_options = 0;
2245 } else {
2246 src_dma = edesc->sec4_sg_dma;
2247 sec4_sg_index += edesc->src_nents;
2248 in_options = LDST_SGF;
2249 }
2250 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2251
2252 if (iv_contig) {
2253 dst_dma = edesc->iv_dma;
2254 out_options = 0;
2255 } else {
2256 dst_dma = edesc->sec4_sg_dma +
2257 sec4_sg_index * sizeof(struct sec4_sg_entry);
2258 out_options = LDST_SGF;
2259 }
2260 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2261}
2262
2263/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002264 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002265 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002266static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2267 int desc_bytes, bool *all_contig_ptr,
2268 bool encrypt)
2269{
2270 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2271 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2272 struct device *jrdev = ctx->jrdev;
2273 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2274 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2275 int src_nents, dst_nents = 0;
2276 struct aead_edesc *edesc;
2277 int sgc;
2278 bool all_contig = true;
Herbert Xuf2147b82015-06-16 13:54:23 +08002279 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2280 unsigned int authsize = ctx->authsize;
2281
2282 if (unlikely(req->dst != req->src)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002283 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
Herbert Xuf2147b82015-06-16 13:54:23 +08002284 dst_nents = sg_count(req->dst,
2285 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002286 (encrypt ? authsize : (-authsize)));
Herbert Xuf2147b82015-06-16 13:54:23 +08002287 } else {
2288 src_nents = sg_count(req->src,
2289 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002290 (encrypt ? authsize : 0));
Herbert Xuf2147b82015-06-16 13:54:23 +08002291 }
2292
2293 /* Check if data are contiguous. */
2294 all_contig = !src_nents;
2295 if (!all_contig) {
2296 src_nents = src_nents ? : 1;
2297 sec4_sg_len = src_nents;
2298 }
2299
2300 sec4_sg_len += dst_nents;
2301
2302 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2303
2304 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002305 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2306 GFP_DMA | flags);
Herbert Xuf2147b82015-06-16 13:54:23 +08002307 if (!edesc) {
2308 dev_err(jrdev, "could not allocate extended descriptor\n");
2309 return ERR_PTR(-ENOMEM);
2310 }
2311
2312 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002313 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2314 DMA_BIDIRECTIONAL);
Herbert Xuf2147b82015-06-16 13:54:23 +08002315 if (unlikely(!sgc)) {
2316 dev_err(jrdev, "unable to map source\n");
2317 kfree(edesc);
2318 return ERR_PTR(-ENOMEM);
2319 }
2320 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002321 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2322 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002323 if (unlikely(!sgc)) {
2324 dev_err(jrdev, "unable to map source\n");
2325 kfree(edesc);
2326 return ERR_PTR(-ENOMEM);
2327 }
2328
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002329 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2330 DMA_FROM_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002331 if (unlikely(!sgc)) {
2332 dev_err(jrdev, "unable to map destination\n");
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002333 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2334 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002335 kfree(edesc);
2336 return ERR_PTR(-ENOMEM);
2337 }
2338 }
2339
2340 edesc->src_nents = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002341 edesc->dst_nents = dst_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002342 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2343 desc_bytes;
2344 *all_contig_ptr = all_contig;
2345
2346 sec4_sg_index = 0;
2347 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08002348 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08002349 edesc->sec4_sg + sec4_sg_index, 0);
2350 sec4_sg_index += src_nents;
2351 }
2352 if (dst_nents) {
2353 sg_to_sec4_sg_last(req->dst, dst_nents,
2354 edesc->sec4_sg + sec4_sg_index, 0);
2355 }
2356
2357 if (!sec4_sg_bytes)
2358 return edesc;
2359
2360 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2361 sec4_sg_bytes, DMA_TO_DEVICE);
2362 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2363 dev_err(jrdev, "unable to map S/G table\n");
2364 aead_unmap(jrdev, edesc, req);
2365 kfree(edesc);
2366 return ERR_PTR(-ENOMEM);
2367 }
2368
2369 edesc->sec4_sg_bytes = sec4_sg_bytes;
2370
2371 return edesc;
2372}
2373
2374static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002375{
Yuan Kang0e479302011-07-15 11:21:41 +08002376 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002377 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002378 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2379 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002380 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002381 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002382 int ret = 0;
2383
Kim Phillips8e8ec592011-03-13 16:54:26 +08002384 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002385 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002386 if (IS_ERR(edesc))
2387 return PTR_ERR(edesc);
2388
Yuan Kang1acebad2011-07-15 11:21:42 +08002389 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002390 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002391#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002392 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002393 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2394 desc_bytes(edesc->hw_desc), 1);
2395#endif
2396
Kim Phillips8e8ec592011-03-13 16:54:26 +08002397 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002398 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2399 if (!ret) {
2400 ret = -EINPROGRESS;
2401 } else {
2402 aead_unmap(jrdev, edesc, req);
2403 kfree(edesc);
2404 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002405
Yuan Kang1acebad2011-07-15 11:21:42 +08002406 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002407}
2408
Herbert Xu46218752015-07-09 07:17:33 +08002409static int ipsec_gcm_encrypt(struct aead_request *req)
2410{
2411 if (req->assoclen < 8)
2412 return -EINVAL;
2413
2414 return gcm_encrypt(req);
2415}
2416
Herbert Xu479bcc72015-07-30 17:53:17 +08002417static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002418{
Yuan Kang1acebad2011-07-15 11:21:42 +08002419 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002420 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002421 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2422 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002423 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002424 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002425 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002426
2427 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002428 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2429 &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08002430 if (IS_ERR(edesc))
2431 return PTR_ERR(edesc);
2432
Herbert Xuf2147b82015-06-16 13:54:23 +08002433 /* Create and submit job descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002434 init_authenc_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002435#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08002436 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2437 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2438 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002439#endif
2440
Herbert Xuf2147b82015-06-16 13:54:23 +08002441 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002442 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002443 if (!ret) {
2444 ret = -EINPROGRESS;
2445 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002446 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002447 kfree(edesc);
2448 }
2449
2450 return ret;
2451}
2452
2453static int gcm_decrypt(struct aead_request *req)
2454{
2455 struct aead_edesc *edesc;
2456 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2457 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2458 struct device *jrdev = ctx->jrdev;
2459 bool all_contig;
2460 u32 *desc;
2461 int ret = 0;
2462
2463 /* allocate extended descriptor */
2464 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2465 if (IS_ERR(edesc))
2466 return PTR_ERR(edesc);
2467
Yuan Kang1acebad2011-07-15 11:21:42 +08002468 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08002469 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad2011-07-15 11:21:42 +08002470#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002471 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002472 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2473 desc_bytes(edesc->hw_desc), 1);
2474#endif
2475
Yuan Kang0e479302011-07-15 11:21:41 +08002476 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002477 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2478 if (!ret) {
2479 ret = -EINPROGRESS;
2480 } else {
2481 aead_unmap(jrdev, edesc, req);
2482 kfree(edesc);
2483 }
Yuan Kang0e479302011-07-15 11:21:41 +08002484
Yuan Kang1acebad2011-07-15 11:21:42 +08002485 return ret;
2486}
Yuan Kang0e479302011-07-15 11:21:41 +08002487
Herbert Xu46218752015-07-09 07:17:33 +08002488static int ipsec_gcm_decrypt(struct aead_request *req)
2489{
2490 if (req->assoclen < 8)
2491 return -EINVAL;
2492
2493 return gcm_decrypt(req);
2494}
2495
Herbert Xu479bcc72015-07-30 17:53:17 +08002496static int aead_decrypt(struct aead_request *req)
Herbert Xuf2147b82015-06-16 13:54:23 +08002497{
2498 struct aead_edesc *edesc;
2499 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2500 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2501 struct device *jrdev = ctx->jrdev;
2502 bool all_contig;
2503 u32 *desc;
2504 int ret = 0;
2505
2506 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002507 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2508 &all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002509 if (IS_ERR(edesc))
2510 return PTR_ERR(edesc);
2511
2512#ifdef DEBUG
2513 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2514 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Herbert Xu479bcc72015-07-30 17:53:17 +08002515 req->assoclen + req->cryptlen, 1);
Herbert Xuf2147b82015-06-16 13:54:23 +08002516#endif
2517
2518 /* Create and submit job descriptor*/
Herbert Xu479bcc72015-07-30 17:53:17 +08002519 init_authenc_job(req, edesc, all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002520#ifdef DEBUG
2521 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2522 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2523 desc_bytes(edesc->hw_desc), 1);
2524#endif
2525
2526 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002527 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002528 if (!ret) {
2529 ret = -EINPROGRESS;
2530 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002531 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002532 kfree(edesc);
2533 }
2534
2535 return ret;
2536}
2537
Herbert Xu479bcc72015-07-30 17:53:17 +08002538static int aead_givdecrypt(struct aead_request *req)
Yuan Kang1acebad2011-07-15 11:21:42 +08002539{
Yuan Kang1acebad2011-07-15 11:21:42 +08002540 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Herbert Xu479bcc72015-07-30 17:53:17 +08002541 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang0e479302011-07-15 11:21:41 +08002542
Herbert Xu479bcc72015-07-30 17:53:17 +08002543 if (req->cryptlen < ivsize)
2544 return -EINVAL;
Yuan Kang0e479302011-07-15 11:21:41 +08002545
Herbert Xu479bcc72015-07-30 17:53:17 +08002546 req->cryptlen -= ivsize;
2547 req->assoclen += ivsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08002548
Herbert Xu479bcc72015-07-30 17:53:17 +08002549 return aead_decrypt(req);
Horia Geantaae4a8252014-03-14 17:46:52 +02002550}
2551
Yuan Kangacdca312011-07-15 11:21:42 +08002552/*
2553 * allocate and map the ablkcipher extended descriptor for ablkcipher
2554 */
2555static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2556 *req, int desc_bytes,
2557 bool *iv_contig_out)
2558{
2559 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2560 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2561 struct device *jrdev = ctx->jrdev;
2562 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2563 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2564 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002565 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002566 struct ablkcipher_edesc *edesc;
2567 dma_addr_t iv_dma = 0;
2568 bool iv_contig = false;
2569 int sgc;
2570 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kanga299c832012-06-22 19:48:46 -05002571 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002572
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002573 src_nents = sg_count(req->src, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002574
Yuan Kang643b39b2012-06-22 19:48:49 -05002575 if (req->dst != req->src)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002576 dst_nents = sg_count(req->dst, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002577
2578 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002579 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2580 DMA_BIDIRECTIONAL);
Yuan Kangacdca312011-07-15 11:21:42 +08002581 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002582 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2583 DMA_TO_DEVICE);
2584 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2585 DMA_FROM_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08002586 }
2587
Horia Geantace572082014-07-11 15:34:49 +03002588 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2589 if (dma_mapping_error(jrdev, iv_dma)) {
2590 dev_err(jrdev, "unable to map IV\n");
2591 return ERR_PTR(-ENOMEM);
2592 }
2593
Yuan Kangacdca312011-07-15 11:21:42 +08002594 /*
2595 * Check if iv can be contiguous with source and destination.
2596 * If so, include it. If not, create scatterlist.
2597 */
Yuan Kangacdca312011-07-15 11:21:42 +08002598 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2599 iv_contig = true;
2600 else
2601 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002602 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2603 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002604
2605 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002606 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2607 GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002608 if (!edesc) {
2609 dev_err(jrdev, "could not allocate extended descriptor\n");
2610 return ERR_PTR(-ENOMEM);
2611 }
2612
2613 edesc->src_nents = src_nents;
2614 edesc->dst_nents = dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -05002615 edesc->sec4_sg_bytes = sec4_sg_bytes;
2616 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2617 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002618
Yuan Kanga299c832012-06-22 19:48:46 -05002619 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002620 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002621 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2622 sg_to_sec4_sg_last(req->src, src_nents,
2623 edesc->sec4_sg + 1, 0);
2624 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002625 }
2626
Yuan Kang643b39b2012-06-22 19:48:49 -05002627 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002628 sg_to_sec4_sg_last(req->dst, dst_nents,
2629 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002630 }
2631
Yuan Kanga299c832012-06-22 19:48:46 -05002632 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2633 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002634 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2635 dev_err(jrdev, "unable to map S/G table\n");
2636 return ERR_PTR(-ENOMEM);
2637 }
2638
Yuan Kangacdca312011-07-15 11:21:42 +08002639 edesc->iv_dma = iv_dma;
2640
2641#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002642 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002643 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2644 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002645#endif
2646
2647 *iv_contig_out = iv_contig;
2648 return edesc;
2649}
2650
2651static int ablkcipher_encrypt(struct ablkcipher_request *req)
2652{
2653 struct ablkcipher_edesc *edesc;
2654 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2655 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2656 struct device *jrdev = ctx->jrdev;
2657 bool iv_contig;
2658 u32 *desc;
2659 int ret = 0;
2660
2661 /* allocate extended descriptor */
2662 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2663 CAAM_CMD_SZ, &iv_contig);
2664 if (IS_ERR(edesc))
2665 return PTR_ERR(edesc);
2666
2667 /* Create and submit job descriptor*/
2668 init_ablkcipher_job(ctx->sh_desc_enc,
2669 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2670#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002671 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002672 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2673 desc_bytes(edesc->hw_desc), 1);
2674#endif
2675 desc = edesc->hw_desc;
2676 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2677
2678 if (!ret) {
2679 ret = -EINPROGRESS;
2680 } else {
2681 ablkcipher_unmap(jrdev, edesc, req);
2682 kfree(edesc);
2683 }
2684
2685 return ret;
2686}
2687
2688static int ablkcipher_decrypt(struct ablkcipher_request *req)
2689{
2690 struct ablkcipher_edesc *edesc;
2691 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2692 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2693 struct device *jrdev = ctx->jrdev;
2694 bool iv_contig;
2695 u32 *desc;
2696 int ret = 0;
2697
2698 /* allocate extended descriptor */
2699 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2700 CAAM_CMD_SZ, &iv_contig);
2701 if (IS_ERR(edesc))
2702 return PTR_ERR(edesc);
2703
2704 /* Create and submit job descriptor*/
2705 init_ablkcipher_job(ctx->sh_desc_dec,
2706 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2707 desc = edesc->hw_desc;
2708#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002709 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002710 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2711 desc_bytes(edesc->hw_desc), 1);
2712#endif
2713
2714 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2715 if (!ret) {
2716 ret = -EINPROGRESS;
2717 } else {
2718 ablkcipher_unmap(jrdev, edesc, req);
2719 kfree(edesc);
2720 }
2721
2722 return ret;
2723}
2724
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002725/*
2726 * allocate and map the ablkcipher extended descriptor
2727 * for ablkcipher givencrypt
2728 */
2729static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2730 struct skcipher_givcrypt_request *greq,
2731 int desc_bytes,
2732 bool *iv_contig_out)
2733{
2734 struct ablkcipher_request *req = &greq->creq;
2735 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2736 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2737 struct device *jrdev = ctx->jrdev;
2738 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2739 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2740 GFP_KERNEL : GFP_ATOMIC;
2741 int src_nents, dst_nents = 0, sec4_sg_bytes;
2742 struct ablkcipher_edesc *edesc;
2743 dma_addr_t iv_dma = 0;
2744 bool iv_contig = false;
2745 int sgc;
2746 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002747 int sec4_sg_index;
2748
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002749 src_nents = sg_count(req->src, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002750
2751 if (unlikely(req->dst != req->src))
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002752 dst_nents = sg_count(req->dst, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002753
2754 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002755 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2756 DMA_BIDIRECTIONAL);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002757 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002758 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2759 DMA_TO_DEVICE);
2760 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2761 DMA_FROM_DEVICE);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002762 }
2763
2764 /*
2765 * Check if iv can be contiguous with source and destination.
2766 * If so, include it. If not, create scatterlist.
2767 */
2768 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2769 if (dma_mapping_error(jrdev, iv_dma)) {
2770 dev_err(jrdev, "unable to map IV\n");
2771 return ERR_PTR(-ENOMEM);
2772 }
2773
2774 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2775 iv_contig = true;
2776 else
2777 dst_nents = dst_nents ? : 1;
2778 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2779 sizeof(struct sec4_sg_entry);
2780
2781 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002782 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2783 GFP_DMA | flags);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002784 if (!edesc) {
2785 dev_err(jrdev, "could not allocate extended descriptor\n");
2786 return ERR_PTR(-ENOMEM);
2787 }
2788
2789 edesc->src_nents = src_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002790 edesc->dst_nents = dst_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002791 edesc->sec4_sg_bytes = sec4_sg_bytes;
2792 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2793 desc_bytes;
2794
2795 sec4_sg_index = 0;
2796 if (src_nents) {
2797 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2798 sec4_sg_index += src_nents;
2799 }
2800
2801 if (!iv_contig) {
2802 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2803 iv_dma, ivsize, 0);
2804 sec4_sg_index += 1;
2805 sg_to_sec4_sg_last(req->dst, dst_nents,
2806 edesc->sec4_sg + sec4_sg_index, 0);
2807 }
2808
2809 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2810 sec4_sg_bytes, DMA_TO_DEVICE);
2811 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2812 dev_err(jrdev, "unable to map S/G table\n");
2813 return ERR_PTR(-ENOMEM);
2814 }
2815 edesc->iv_dma = iv_dma;
2816
2817#ifdef DEBUG
2818 print_hex_dump(KERN_ERR,
2819 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2820 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2821 sec4_sg_bytes, 1);
2822#endif
2823
2824 *iv_contig_out = iv_contig;
2825 return edesc;
2826}
2827
2828static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2829{
2830 struct ablkcipher_request *req = &creq->creq;
2831 struct ablkcipher_edesc *edesc;
2832 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2833 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2834 struct device *jrdev = ctx->jrdev;
2835 bool iv_contig;
2836 u32 *desc;
2837 int ret = 0;
2838
2839 /* allocate extended descriptor */
2840 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2841 CAAM_CMD_SZ, &iv_contig);
2842 if (IS_ERR(edesc))
2843 return PTR_ERR(edesc);
2844
2845 /* Create and submit job descriptor*/
2846 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2847 edesc, req, iv_contig);
2848#ifdef DEBUG
2849 print_hex_dump(KERN_ERR,
2850 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2851 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2852 desc_bytes(edesc->hw_desc), 1);
2853#endif
2854 desc = edesc->hw_desc;
2855 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2856
2857 if (!ret) {
2858 ret = -EINPROGRESS;
2859 } else {
2860 ablkcipher_unmap(jrdev, edesc, req);
2861 kfree(edesc);
2862 }
2863
2864 return ret;
2865}
2866
Yuan Kang885e9e22011-07-15 11:21:41 +08002867#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002868#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002869struct caam_alg_template {
2870 char name[CRYPTO_MAX_ALG_NAME];
2871 char driver_name[CRYPTO_MAX_ALG_NAME];
2872 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002873 u32 type;
2874 union {
2875 struct ablkcipher_alg ablkcipher;
Yuan Kang885e9e22011-07-15 11:21:41 +08002876 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002877 u32 class1_alg_type;
2878 u32 class2_alg_type;
2879 u32 alg_op;
2880};
2881
2882static struct caam_alg_template driver_algs[] = {
Yuan Kangacdca312011-07-15 11:21:42 +08002883 /* ablkcipher descriptor */
2884 {
2885 .name = "cbc(aes)",
2886 .driver_name = "cbc-aes-caam",
2887 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002888 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002889 .template_ablkcipher = {
2890 .setkey = ablkcipher_setkey,
2891 .encrypt = ablkcipher_encrypt,
2892 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002893 .givencrypt = ablkcipher_givencrypt,
2894 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002895 .min_keysize = AES_MIN_KEY_SIZE,
2896 .max_keysize = AES_MAX_KEY_SIZE,
2897 .ivsize = AES_BLOCK_SIZE,
2898 },
2899 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2900 },
2901 {
2902 .name = "cbc(des3_ede)",
2903 .driver_name = "cbc-3des-caam",
2904 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002905 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002906 .template_ablkcipher = {
2907 .setkey = ablkcipher_setkey,
2908 .encrypt = ablkcipher_encrypt,
2909 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002910 .givencrypt = ablkcipher_givencrypt,
2911 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002912 .min_keysize = DES3_EDE_KEY_SIZE,
2913 .max_keysize = DES3_EDE_KEY_SIZE,
2914 .ivsize = DES3_EDE_BLOCK_SIZE,
2915 },
2916 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2917 },
2918 {
2919 .name = "cbc(des)",
2920 .driver_name = "cbc-des-caam",
2921 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002922 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002923 .template_ablkcipher = {
2924 .setkey = ablkcipher_setkey,
2925 .encrypt = ablkcipher_encrypt,
2926 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002927 .givencrypt = ablkcipher_givencrypt,
2928 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002929 .min_keysize = DES_KEY_SIZE,
2930 .max_keysize = DES_KEY_SIZE,
2931 .ivsize = DES_BLOCK_SIZE,
2932 },
2933 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02002934 },
2935 {
2936 .name = "ctr(aes)",
2937 .driver_name = "ctr-aes-caam",
2938 .blocksize = 1,
2939 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2940 .template_ablkcipher = {
2941 .setkey = ablkcipher_setkey,
2942 .encrypt = ablkcipher_encrypt,
2943 .decrypt = ablkcipher_decrypt,
2944 .geniv = "chainiv",
2945 .min_keysize = AES_MIN_KEY_SIZE,
2946 .max_keysize = AES_MAX_KEY_SIZE,
2947 .ivsize = AES_BLOCK_SIZE,
2948 },
2949 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002950 },
2951 {
2952 .name = "rfc3686(ctr(aes))",
2953 .driver_name = "rfc3686-ctr-aes-caam",
2954 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002955 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002956 .template_ablkcipher = {
2957 .setkey = ablkcipher_setkey,
2958 .encrypt = ablkcipher_encrypt,
2959 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002960 .givencrypt = ablkcipher_givencrypt,
2961 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002962 .min_keysize = AES_MIN_KEY_SIZE +
2963 CTR_RFC3686_NONCE_SIZE,
2964 .max_keysize = AES_MAX_KEY_SIZE +
2965 CTR_RFC3686_NONCE_SIZE,
2966 .ivsize = CTR_RFC3686_IV_SIZE,
2967 },
2968 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilec6415a62015-10-02 13:13:18 +03002969 },
2970 {
2971 .name = "xts(aes)",
2972 .driver_name = "xts-aes-caam",
2973 .blocksize = AES_BLOCK_SIZE,
2974 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2975 .template_ablkcipher = {
2976 .setkey = xts_ablkcipher_setkey,
2977 .encrypt = ablkcipher_encrypt,
2978 .decrypt = ablkcipher_decrypt,
2979 .geniv = "eseqiv",
2980 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2981 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2982 .ivsize = AES_BLOCK_SIZE,
2983 },
2984 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2985 },
Kim Phillips8e8ec592011-03-13 16:54:26 +08002986};
2987
Herbert Xuf2147b82015-06-16 13:54:23 +08002988static struct caam_aead_alg driver_aeads[] = {
2989 {
2990 .aead = {
2991 .base = {
2992 .cra_name = "rfc4106(gcm(aes))",
2993 .cra_driver_name = "rfc4106-gcm-aes-caam",
2994 .cra_blocksize = 1,
2995 },
2996 .setkey = rfc4106_setkey,
2997 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08002998 .encrypt = ipsec_gcm_encrypt,
2999 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003000 .ivsize = 8,
3001 .maxauthsize = AES_BLOCK_SIZE,
3002 },
3003 .caam = {
3004 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3005 },
3006 },
3007 {
3008 .aead = {
3009 .base = {
3010 .cra_name = "rfc4543(gcm(aes))",
3011 .cra_driver_name = "rfc4543-gcm-aes-caam",
3012 .cra_blocksize = 1,
3013 },
3014 .setkey = rfc4543_setkey,
3015 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003016 .encrypt = ipsec_gcm_encrypt,
3017 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003018 .ivsize = 8,
3019 .maxauthsize = AES_BLOCK_SIZE,
3020 },
3021 .caam = {
3022 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3023 },
3024 },
3025 /* Galois Counter Mode */
3026 {
3027 .aead = {
3028 .base = {
3029 .cra_name = "gcm(aes)",
3030 .cra_driver_name = "gcm-aes-caam",
3031 .cra_blocksize = 1,
3032 },
3033 .setkey = gcm_setkey,
3034 .setauthsize = gcm_setauthsize,
3035 .encrypt = gcm_encrypt,
3036 .decrypt = gcm_decrypt,
3037 .ivsize = 12,
3038 .maxauthsize = AES_BLOCK_SIZE,
3039 },
3040 .caam = {
3041 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3042 },
3043 },
Herbert Xu479bcc72015-07-30 17:53:17 +08003044 /* single-pass ipsec_esp descriptor */
3045 {
3046 .aead = {
3047 .base = {
3048 .cra_name = "authenc(hmac(md5),"
3049 "ecb(cipher_null))",
3050 .cra_driver_name = "authenc-hmac-md5-"
3051 "ecb-cipher_null-caam",
3052 .cra_blocksize = NULL_BLOCK_SIZE,
3053 },
3054 .setkey = aead_setkey,
3055 .setauthsize = aead_setauthsize,
3056 .encrypt = aead_encrypt,
3057 .decrypt = aead_decrypt,
3058 .ivsize = NULL_IV_SIZE,
3059 .maxauthsize = MD5_DIGEST_SIZE,
3060 },
3061 .caam = {
3062 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3063 OP_ALG_AAI_HMAC_PRECOMP,
3064 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3065 },
3066 },
3067 {
3068 .aead = {
3069 .base = {
3070 .cra_name = "authenc(hmac(sha1),"
3071 "ecb(cipher_null))",
3072 .cra_driver_name = "authenc-hmac-sha1-"
3073 "ecb-cipher_null-caam",
3074 .cra_blocksize = NULL_BLOCK_SIZE,
3075 },
3076 .setkey = aead_setkey,
3077 .setauthsize = aead_setauthsize,
3078 .encrypt = aead_encrypt,
3079 .decrypt = aead_decrypt,
3080 .ivsize = NULL_IV_SIZE,
3081 .maxauthsize = SHA1_DIGEST_SIZE,
3082 },
3083 .caam = {
3084 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3085 OP_ALG_AAI_HMAC_PRECOMP,
3086 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3087 },
3088 },
3089 {
3090 .aead = {
3091 .base = {
3092 .cra_name = "authenc(hmac(sha224),"
3093 "ecb(cipher_null))",
3094 .cra_driver_name = "authenc-hmac-sha224-"
3095 "ecb-cipher_null-caam",
3096 .cra_blocksize = NULL_BLOCK_SIZE,
3097 },
3098 .setkey = aead_setkey,
3099 .setauthsize = aead_setauthsize,
3100 .encrypt = aead_encrypt,
3101 .decrypt = aead_decrypt,
3102 .ivsize = NULL_IV_SIZE,
3103 .maxauthsize = SHA224_DIGEST_SIZE,
3104 },
3105 .caam = {
3106 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3107 OP_ALG_AAI_HMAC_PRECOMP,
3108 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3109 },
3110 },
3111 {
3112 .aead = {
3113 .base = {
3114 .cra_name = "authenc(hmac(sha256),"
3115 "ecb(cipher_null))",
3116 .cra_driver_name = "authenc-hmac-sha256-"
3117 "ecb-cipher_null-caam",
3118 .cra_blocksize = NULL_BLOCK_SIZE,
3119 },
3120 .setkey = aead_setkey,
3121 .setauthsize = aead_setauthsize,
3122 .encrypt = aead_encrypt,
3123 .decrypt = aead_decrypt,
3124 .ivsize = NULL_IV_SIZE,
3125 .maxauthsize = SHA256_DIGEST_SIZE,
3126 },
3127 .caam = {
3128 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3129 OP_ALG_AAI_HMAC_PRECOMP,
3130 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3131 },
3132 },
3133 {
3134 .aead = {
3135 .base = {
3136 .cra_name = "authenc(hmac(sha384),"
3137 "ecb(cipher_null))",
3138 .cra_driver_name = "authenc-hmac-sha384-"
3139 "ecb-cipher_null-caam",
3140 .cra_blocksize = NULL_BLOCK_SIZE,
3141 },
3142 .setkey = aead_setkey,
3143 .setauthsize = aead_setauthsize,
3144 .encrypt = aead_encrypt,
3145 .decrypt = aead_decrypt,
3146 .ivsize = NULL_IV_SIZE,
3147 .maxauthsize = SHA384_DIGEST_SIZE,
3148 },
3149 .caam = {
3150 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3151 OP_ALG_AAI_HMAC_PRECOMP,
3152 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3153 },
3154 },
3155 {
3156 .aead = {
3157 .base = {
3158 .cra_name = "authenc(hmac(sha512),"
3159 "ecb(cipher_null))",
3160 .cra_driver_name = "authenc-hmac-sha512-"
3161 "ecb-cipher_null-caam",
3162 .cra_blocksize = NULL_BLOCK_SIZE,
3163 },
3164 .setkey = aead_setkey,
3165 .setauthsize = aead_setauthsize,
3166 .encrypt = aead_encrypt,
3167 .decrypt = aead_decrypt,
3168 .ivsize = NULL_IV_SIZE,
3169 .maxauthsize = SHA512_DIGEST_SIZE,
3170 },
3171 .caam = {
3172 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3173 OP_ALG_AAI_HMAC_PRECOMP,
3174 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3175 },
3176 },
3177 {
3178 .aead = {
3179 .base = {
3180 .cra_name = "authenc(hmac(md5),cbc(aes))",
3181 .cra_driver_name = "authenc-hmac-md5-"
3182 "cbc-aes-caam",
3183 .cra_blocksize = AES_BLOCK_SIZE,
3184 },
3185 .setkey = aead_setkey,
3186 .setauthsize = aead_setauthsize,
3187 .encrypt = aead_encrypt,
3188 .decrypt = aead_decrypt,
3189 .ivsize = AES_BLOCK_SIZE,
3190 .maxauthsize = MD5_DIGEST_SIZE,
3191 },
3192 .caam = {
3193 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3194 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3195 OP_ALG_AAI_HMAC_PRECOMP,
3196 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3197 },
3198 },
3199 {
3200 .aead = {
3201 .base = {
3202 .cra_name = "echainiv(authenc(hmac(md5),"
3203 "cbc(aes)))",
3204 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3205 "cbc-aes-caam",
3206 .cra_blocksize = AES_BLOCK_SIZE,
3207 },
3208 .setkey = aead_setkey,
3209 .setauthsize = aead_setauthsize,
3210 .encrypt = aead_encrypt,
3211 .decrypt = aead_givdecrypt,
3212 .ivsize = AES_BLOCK_SIZE,
3213 .maxauthsize = MD5_DIGEST_SIZE,
3214 },
3215 .caam = {
3216 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3217 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3218 OP_ALG_AAI_HMAC_PRECOMP,
3219 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3220 .geniv = true,
3221 },
3222 },
3223 {
3224 .aead = {
3225 .base = {
3226 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3227 .cra_driver_name = "authenc-hmac-sha1-"
3228 "cbc-aes-caam",
3229 .cra_blocksize = AES_BLOCK_SIZE,
3230 },
3231 .setkey = aead_setkey,
3232 .setauthsize = aead_setauthsize,
3233 .encrypt = aead_encrypt,
3234 .decrypt = aead_decrypt,
3235 .ivsize = AES_BLOCK_SIZE,
3236 .maxauthsize = SHA1_DIGEST_SIZE,
3237 },
3238 .caam = {
3239 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3240 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3241 OP_ALG_AAI_HMAC_PRECOMP,
3242 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3243 },
3244 },
3245 {
3246 .aead = {
3247 .base = {
3248 .cra_name = "echainiv(authenc(hmac(sha1),"
3249 "cbc(aes)))",
3250 .cra_driver_name = "echainiv-authenc-"
3251 "hmac-sha1-cbc-aes-caam",
3252 .cra_blocksize = AES_BLOCK_SIZE,
3253 },
3254 .setkey = aead_setkey,
3255 .setauthsize = aead_setauthsize,
3256 .encrypt = aead_encrypt,
3257 .decrypt = aead_givdecrypt,
3258 .ivsize = AES_BLOCK_SIZE,
3259 .maxauthsize = SHA1_DIGEST_SIZE,
3260 },
3261 .caam = {
3262 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3263 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3264 OP_ALG_AAI_HMAC_PRECOMP,
3265 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3266 .geniv = true,
3267 },
3268 },
3269 {
3270 .aead = {
3271 .base = {
3272 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3273 .cra_driver_name = "authenc-hmac-sha224-"
3274 "cbc-aes-caam",
3275 .cra_blocksize = AES_BLOCK_SIZE,
3276 },
3277 .setkey = aead_setkey,
3278 .setauthsize = aead_setauthsize,
3279 .encrypt = aead_encrypt,
3280 .decrypt = aead_decrypt,
3281 .ivsize = AES_BLOCK_SIZE,
3282 .maxauthsize = SHA224_DIGEST_SIZE,
3283 },
3284 .caam = {
3285 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3286 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3287 OP_ALG_AAI_HMAC_PRECOMP,
3288 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3289 },
3290 },
3291 {
3292 .aead = {
3293 .base = {
3294 .cra_name = "echainiv(authenc(hmac(sha224),"
3295 "cbc(aes)))",
3296 .cra_driver_name = "echainiv-authenc-"
3297 "hmac-sha224-cbc-aes-caam",
3298 .cra_blocksize = AES_BLOCK_SIZE,
3299 },
3300 .setkey = aead_setkey,
3301 .setauthsize = aead_setauthsize,
3302 .encrypt = aead_encrypt,
3303 .decrypt = aead_givdecrypt,
3304 .ivsize = AES_BLOCK_SIZE,
3305 .maxauthsize = SHA224_DIGEST_SIZE,
3306 },
3307 .caam = {
3308 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3309 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3310 OP_ALG_AAI_HMAC_PRECOMP,
3311 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3312 .geniv = true,
3313 },
3314 },
3315 {
3316 .aead = {
3317 .base = {
3318 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3319 .cra_driver_name = "authenc-hmac-sha256-"
3320 "cbc-aes-caam",
3321 .cra_blocksize = AES_BLOCK_SIZE,
3322 },
3323 .setkey = aead_setkey,
3324 .setauthsize = aead_setauthsize,
3325 .encrypt = aead_encrypt,
3326 .decrypt = aead_decrypt,
3327 .ivsize = AES_BLOCK_SIZE,
3328 .maxauthsize = SHA256_DIGEST_SIZE,
3329 },
3330 .caam = {
3331 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3332 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3333 OP_ALG_AAI_HMAC_PRECOMP,
3334 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3335 },
3336 },
3337 {
3338 .aead = {
3339 .base = {
3340 .cra_name = "echainiv(authenc(hmac(sha256),"
3341 "cbc(aes)))",
3342 .cra_driver_name = "echainiv-authenc-"
3343 "hmac-sha256-cbc-aes-caam",
3344 .cra_blocksize = AES_BLOCK_SIZE,
3345 },
3346 .setkey = aead_setkey,
3347 .setauthsize = aead_setauthsize,
3348 .encrypt = aead_encrypt,
3349 .decrypt = aead_givdecrypt,
3350 .ivsize = AES_BLOCK_SIZE,
3351 .maxauthsize = SHA256_DIGEST_SIZE,
3352 },
3353 .caam = {
3354 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3355 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3356 OP_ALG_AAI_HMAC_PRECOMP,
3357 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3358 .geniv = true,
3359 },
3360 },
3361 {
3362 .aead = {
3363 .base = {
3364 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3365 .cra_driver_name = "authenc-hmac-sha384-"
3366 "cbc-aes-caam",
3367 .cra_blocksize = AES_BLOCK_SIZE,
3368 },
3369 .setkey = aead_setkey,
3370 .setauthsize = aead_setauthsize,
3371 .encrypt = aead_encrypt,
3372 .decrypt = aead_decrypt,
3373 .ivsize = AES_BLOCK_SIZE,
3374 .maxauthsize = SHA384_DIGEST_SIZE,
3375 },
3376 .caam = {
3377 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3378 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3379 OP_ALG_AAI_HMAC_PRECOMP,
3380 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3381 },
3382 },
3383 {
3384 .aead = {
3385 .base = {
3386 .cra_name = "echainiv(authenc(hmac(sha384),"
3387 "cbc(aes)))",
3388 .cra_driver_name = "echainiv-authenc-"
3389 "hmac-sha384-cbc-aes-caam",
3390 .cra_blocksize = AES_BLOCK_SIZE,
3391 },
3392 .setkey = aead_setkey,
3393 .setauthsize = aead_setauthsize,
3394 .encrypt = aead_encrypt,
3395 .decrypt = aead_givdecrypt,
3396 .ivsize = AES_BLOCK_SIZE,
3397 .maxauthsize = SHA384_DIGEST_SIZE,
3398 },
3399 .caam = {
3400 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3401 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3402 OP_ALG_AAI_HMAC_PRECOMP,
3403 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3404 .geniv = true,
3405 },
3406 },
3407 {
3408 .aead = {
3409 .base = {
3410 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3411 .cra_driver_name = "authenc-hmac-sha512-"
3412 "cbc-aes-caam",
3413 .cra_blocksize = AES_BLOCK_SIZE,
3414 },
3415 .setkey = aead_setkey,
3416 .setauthsize = aead_setauthsize,
3417 .encrypt = aead_encrypt,
3418 .decrypt = aead_decrypt,
3419 .ivsize = AES_BLOCK_SIZE,
3420 .maxauthsize = SHA512_DIGEST_SIZE,
3421 },
3422 .caam = {
3423 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3424 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3425 OP_ALG_AAI_HMAC_PRECOMP,
3426 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3427 },
3428 },
3429 {
3430 .aead = {
3431 .base = {
3432 .cra_name = "echainiv(authenc(hmac(sha512),"
3433 "cbc(aes)))",
3434 .cra_driver_name = "echainiv-authenc-"
3435 "hmac-sha512-cbc-aes-caam",
3436 .cra_blocksize = AES_BLOCK_SIZE,
3437 },
3438 .setkey = aead_setkey,
3439 .setauthsize = aead_setauthsize,
3440 .encrypt = aead_encrypt,
3441 .decrypt = aead_givdecrypt,
3442 .ivsize = AES_BLOCK_SIZE,
3443 .maxauthsize = SHA512_DIGEST_SIZE,
3444 },
3445 .caam = {
3446 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3447 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3448 OP_ALG_AAI_HMAC_PRECOMP,
3449 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3450 .geniv = true,
3451 },
3452 },
3453 {
3454 .aead = {
3455 .base = {
3456 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3457 .cra_driver_name = "authenc-hmac-md5-"
3458 "cbc-des3_ede-caam",
3459 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3460 },
3461 .setkey = aead_setkey,
3462 .setauthsize = aead_setauthsize,
3463 .encrypt = aead_encrypt,
3464 .decrypt = aead_decrypt,
3465 .ivsize = DES3_EDE_BLOCK_SIZE,
3466 .maxauthsize = MD5_DIGEST_SIZE,
3467 },
3468 .caam = {
3469 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3470 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3471 OP_ALG_AAI_HMAC_PRECOMP,
3472 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3473 }
3474 },
3475 {
3476 .aead = {
3477 .base = {
3478 .cra_name = "echainiv(authenc(hmac(md5),"
3479 "cbc(des3_ede)))",
3480 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3481 "cbc-des3_ede-caam",
3482 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3483 },
3484 .setkey = aead_setkey,
3485 .setauthsize = aead_setauthsize,
3486 .encrypt = aead_encrypt,
3487 .decrypt = aead_givdecrypt,
3488 .ivsize = DES3_EDE_BLOCK_SIZE,
3489 .maxauthsize = MD5_DIGEST_SIZE,
3490 },
3491 .caam = {
3492 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3493 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3494 OP_ALG_AAI_HMAC_PRECOMP,
3495 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3496 .geniv = true,
3497 }
3498 },
3499 {
3500 .aead = {
3501 .base = {
3502 .cra_name = "authenc(hmac(sha1),"
3503 "cbc(des3_ede))",
3504 .cra_driver_name = "authenc-hmac-sha1-"
3505 "cbc-des3_ede-caam",
3506 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3507 },
3508 .setkey = aead_setkey,
3509 .setauthsize = aead_setauthsize,
3510 .encrypt = aead_encrypt,
3511 .decrypt = aead_decrypt,
3512 .ivsize = DES3_EDE_BLOCK_SIZE,
3513 .maxauthsize = SHA1_DIGEST_SIZE,
3514 },
3515 .caam = {
3516 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3517 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3518 OP_ALG_AAI_HMAC_PRECOMP,
3519 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3520 },
3521 },
3522 {
3523 .aead = {
3524 .base = {
3525 .cra_name = "echainiv(authenc(hmac(sha1),"
3526 "cbc(des3_ede)))",
3527 .cra_driver_name = "echainiv-authenc-"
3528 "hmac-sha1-"
3529 "cbc-des3_ede-caam",
3530 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3531 },
3532 .setkey = aead_setkey,
3533 .setauthsize = aead_setauthsize,
3534 .encrypt = aead_encrypt,
3535 .decrypt = aead_givdecrypt,
3536 .ivsize = DES3_EDE_BLOCK_SIZE,
3537 .maxauthsize = SHA1_DIGEST_SIZE,
3538 },
3539 .caam = {
3540 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3541 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3542 OP_ALG_AAI_HMAC_PRECOMP,
3543 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3544 .geniv = true,
3545 },
3546 },
3547 {
3548 .aead = {
3549 .base = {
3550 .cra_name = "authenc(hmac(sha224),"
3551 "cbc(des3_ede))",
3552 .cra_driver_name = "authenc-hmac-sha224-"
3553 "cbc-des3_ede-caam",
3554 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3555 },
3556 .setkey = aead_setkey,
3557 .setauthsize = aead_setauthsize,
3558 .encrypt = aead_encrypt,
3559 .decrypt = aead_decrypt,
3560 .ivsize = DES3_EDE_BLOCK_SIZE,
3561 .maxauthsize = SHA224_DIGEST_SIZE,
3562 },
3563 .caam = {
3564 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3565 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3566 OP_ALG_AAI_HMAC_PRECOMP,
3567 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3568 },
3569 },
3570 {
3571 .aead = {
3572 .base = {
3573 .cra_name = "echainiv(authenc(hmac(sha224),"
3574 "cbc(des3_ede)))",
3575 .cra_driver_name = "echainiv-authenc-"
3576 "hmac-sha224-"
3577 "cbc-des3_ede-caam",
3578 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3579 },
3580 .setkey = aead_setkey,
3581 .setauthsize = aead_setauthsize,
3582 .encrypt = aead_encrypt,
3583 .decrypt = aead_givdecrypt,
3584 .ivsize = DES3_EDE_BLOCK_SIZE,
3585 .maxauthsize = SHA224_DIGEST_SIZE,
3586 },
3587 .caam = {
3588 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3589 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3590 OP_ALG_AAI_HMAC_PRECOMP,
3591 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3592 .geniv = true,
3593 },
3594 },
3595 {
3596 .aead = {
3597 .base = {
3598 .cra_name = "authenc(hmac(sha256),"
3599 "cbc(des3_ede))",
3600 .cra_driver_name = "authenc-hmac-sha256-"
3601 "cbc-des3_ede-caam",
3602 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3603 },
3604 .setkey = aead_setkey,
3605 .setauthsize = aead_setauthsize,
3606 .encrypt = aead_encrypt,
3607 .decrypt = aead_decrypt,
3608 .ivsize = DES3_EDE_BLOCK_SIZE,
3609 .maxauthsize = SHA256_DIGEST_SIZE,
3610 },
3611 .caam = {
3612 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3613 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3614 OP_ALG_AAI_HMAC_PRECOMP,
3615 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3616 },
3617 },
3618 {
3619 .aead = {
3620 .base = {
3621 .cra_name = "echainiv(authenc(hmac(sha256),"
3622 "cbc(des3_ede)))",
3623 .cra_driver_name = "echainiv-authenc-"
3624 "hmac-sha256-"
3625 "cbc-des3_ede-caam",
3626 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3627 },
3628 .setkey = aead_setkey,
3629 .setauthsize = aead_setauthsize,
3630 .encrypt = aead_encrypt,
3631 .decrypt = aead_givdecrypt,
3632 .ivsize = DES3_EDE_BLOCK_SIZE,
3633 .maxauthsize = SHA256_DIGEST_SIZE,
3634 },
3635 .caam = {
3636 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3637 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3638 OP_ALG_AAI_HMAC_PRECOMP,
3639 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3640 .geniv = true,
3641 },
3642 },
3643 {
3644 .aead = {
3645 .base = {
3646 .cra_name = "authenc(hmac(sha384),"
3647 "cbc(des3_ede))",
3648 .cra_driver_name = "authenc-hmac-sha384-"
3649 "cbc-des3_ede-caam",
3650 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3651 },
3652 .setkey = aead_setkey,
3653 .setauthsize = aead_setauthsize,
3654 .encrypt = aead_encrypt,
3655 .decrypt = aead_decrypt,
3656 .ivsize = DES3_EDE_BLOCK_SIZE,
3657 .maxauthsize = SHA384_DIGEST_SIZE,
3658 },
3659 .caam = {
3660 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3661 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3662 OP_ALG_AAI_HMAC_PRECOMP,
3663 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3664 },
3665 },
3666 {
3667 .aead = {
3668 .base = {
3669 .cra_name = "echainiv(authenc(hmac(sha384),"
3670 "cbc(des3_ede)))",
3671 .cra_driver_name = "echainiv-authenc-"
3672 "hmac-sha384-"
3673 "cbc-des3_ede-caam",
3674 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3675 },
3676 .setkey = aead_setkey,
3677 .setauthsize = aead_setauthsize,
3678 .encrypt = aead_encrypt,
3679 .decrypt = aead_givdecrypt,
3680 .ivsize = DES3_EDE_BLOCK_SIZE,
3681 .maxauthsize = SHA384_DIGEST_SIZE,
3682 },
3683 .caam = {
3684 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3685 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3686 OP_ALG_AAI_HMAC_PRECOMP,
3687 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3688 .geniv = true,
3689 },
3690 },
3691 {
3692 .aead = {
3693 .base = {
3694 .cra_name = "authenc(hmac(sha512),"
3695 "cbc(des3_ede))",
3696 .cra_driver_name = "authenc-hmac-sha512-"
3697 "cbc-des3_ede-caam",
3698 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3699 },
3700 .setkey = aead_setkey,
3701 .setauthsize = aead_setauthsize,
3702 .encrypt = aead_encrypt,
3703 .decrypt = aead_decrypt,
3704 .ivsize = DES3_EDE_BLOCK_SIZE,
3705 .maxauthsize = SHA512_DIGEST_SIZE,
3706 },
3707 .caam = {
3708 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3709 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3710 OP_ALG_AAI_HMAC_PRECOMP,
3711 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3712 },
3713 },
3714 {
3715 .aead = {
3716 .base = {
3717 .cra_name = "echainiv(authenc(hmac(sha512),"
3718 "cbc(des3_ede)))",
3719 .cra_driver_name = "echainiv-authenc-"
3720 "hmac-sha512-"
3721 "cbc-des3_ede-caam",
3722 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3723 },
3724 .setkey = aead_setkey,
3725 .setauthsize = aead_setauthsize,
3726 .encrypt = aead_encrypt,
3727 .decrypt = aead_givdecrypt,
3728 .ivsize = DES3_EDE_BLOCK_SIZE,
3729 .maxauthsize = SHA512_DIGEST_SIZE,
3730 },
3731 .caam = {
3732 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3733 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3734 OP_ALG_AAI_HMAC_PRECOMP,
3735 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3736 .geniv = true,
3737 },
3738 },
3739 {
3740 .aead = {
3741 .base = {
3742 .cra_name = "authenc(hmac(md5),cbc(des))",
3743 .cra_driver_name = "authenc-hmac-md5-"
3744 "cbc-des-caam",
3745 .cra_blocksize = DES_BLOCK_SIZE,
3746 },
3747 .setkey = aead_setkey,
3748 .setauthsize = aead_setauthsize,
3749 .encrypt = aead_encrypt,
3750 .decrypt = aead_decrypt,
3751 .ivsize = DES_BLOCK_SIZE,
3752 .maxauthsize = MD5_DIGEST_SIZE,
3753 },
3754 .caam = {
3755 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3756 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3757 OP_ALG_AAI_HMAC_PRECOMP,
3758 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3759 },
3760 },
3761 {
3762 .aead = {
3763 .base = {
3764 .cra_name = "echainiv(authenc(hmac(md5),"
3765 "cbc(des)))",
3766 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3767 "cbc-des-caam",
3768 .cra_blocksize = DES_BLOCK_SIZE,
3769 },
3770 .setkey = aead_setkey,
3771 .setauthsize = aead_setauthsize,
3772 .encrypt = aead_encrypt,
3773 .decrypt = aead_givdecrypt,
3774 .ivsize = DES_BLOCK_SIZE,
3775 .maxauthsize = MD5_DIGEST_SIZE,
3776 },
3777 .caam = {
3778 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3779 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3780 OP_ALG_AAI_HMAC_PRECOMP,
3781 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3782 .geniv = true,
3783 },
3784 },
3785 {
3786 .aead = {
3787 .base = {
3788 .cra_name = "authenc(hmac(sha1),cbc(des))",
3789 .cra_driver_name = "authenc-hmac-sha1-"
3790 "cbc-des-caam",
3791 .cra_blocksize = DES_BLOCK_SIZE,
3792 },
3793 .setkey = aead_setkey,
3794 .setauthsize = aead_setauthsize,
3795 .encrypt = aead_encrypt,
3796 .decrypt = aead_decrypt,
3797 .ivsize = DES_BLOCK_SIZE,
3798 .maxauthsize = SHA1_DIGEST_SIZE,
3799 },
3800 .caam = {
3801 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3802 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3803 OP_ALG_AAI_HMAC_PRECOMP,
3804 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3805 },
3806 },
3807 {
3808 .aead = {
3809 .base = {
3810 .cra_name = "echainiv(authenc(hmac(sha1),"
3811 "cbc(des)))",
3812 .cra_driver_name = "echainiv-authenc-"
3813 "hmac-sha1-cbc-des-caam",
3814 .cra_blocksize = DES_BLOCK_SIZE,
3815 },
3816 .setkey = aead_setkey,
3817 .setauthsize = aead_setauthsize,
3818 .encrypt = aead_encrypt,
3819 .decrypt = aead_givdecrypt,
3820 .ivsize = DES_BLOCK_SIZE,
3821 .maxauthsize = SHA1_DIGEST_SIZE,
3822 },
3823 .caam = {
3824 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3825 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3826 OP_ALG_AAI_HMAC_PRECOMP,
3827 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3828 .geniv = true,
3829 },
3830 },
3831 {
3832 .aead = {
3833 .base = {
3834 .cra_name = "authenc(hmac(sha224),cbc(des))",
3835 .cra_driver_name = "authenc-hmac-sha224-"
3836 "cbc-des-caam",
3837 .cra_blocksize = DES_BLOCK_SIZE,
3838 },
3839 .setkey = aead_setkey,
3840 .setauthsize = aead_setauthsize,
3841 .encrypt = aead_encrypt,
3842 .decrypt = aead_decrypt,
3843 .ivsize = DES_BLOCK_SIZE,
3844 .maxauthsize = SHA224_DIGEST_SIZE,
3845 },
3846 .caam = {
3847 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3848 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3849 OP_ALG_AAI_HMAC_PRECOMP,
3850 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3851 },
3852 },
3853 {
3854 .aead = {
3855 .base = {
3856 .cra_name = "echainiv(authenc(hmac(sha224),"
3857 "cbc(des)))",
3858 .cra_driver_name = "echainiv-authenc-"
3859 "hmac-sha224-cbc-des-caam",
3860 .cra_blocksize = DES_BLOCK_SIZE,
3861 },
3862 .setkey = aead_setkey,
3863 .setauthsize = aead_setauthsize,
3864 .encrypt = aead_encrypt,
3865 .decrypt = aead_givdecrypt,
3866 .ivsize = DES_BLOCK_SIZE,
3867 .maxauthsize = SHA224_DIGEST_SIZE,
3868 },
3869 .caam = {
3870 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3871 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3872 OP_ALG_AAI_HMAC_PRECOMP,
3873 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3874 .geniv = true,
3875 },
3876 },
3877 {
3878 .aead = {
3879 .base = {
3880 .cra_name = "authenc(hmac(sha256),cbc(des))",
3881 .cra_driver_name = "authenc-hmac-sha256-"
3882 "cbc-des-caam",
3883 .cra_blocksize = DES_BLOCK_SIZE,
3884 },
3885 .setkey = aead_setkey,
3886 .setauthsize = aead_setauthsize,
3887 .encrypt = aead_encrypt,
3888 .decrypt = aead_decrypt,
3889 .ivsize = DES_BLOCK_SIZE,
3890 .maxauthsize = SHA256_DIGEST_SIZE,
3891 },
3892 .caam = {
3893 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3894 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3895 OP_ALG_AAI_HMAC_PRECOMP,
3896 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3897 },
3898 },
3899 {
3900 .aead = {
3901 .base = {
3902 .cra_name = "echainiv(authenc(hmac(sha256),"
3903 "cbc(des)))",
3904 .cra_driver_name = "echainiv-authenc-"
3905 "hmac-sha256-cbc-des-caam",
3906 .cra_blocksize = DES_BLOCK_SIZE,
3907 },
3908 .setkey = aead_setkey,
3909 .setauthsize = aead_setauthsize,
3910 .encrypt = aead_encrypt,
3911 .decrypt = aead_givdecrypt,
3912 .ivsize = DES_BLOCK_SIZE,
3913 .maxauthsize = SHA256_DIGEST_SIZE,
3914 },
3915 .caam = {
3916 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3917 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3918 OP_ALG_AAI_HMAC_PRECOMP,
3919 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3920 .geniv = true,
3921 },
3922 },
3923 {
3924 .aead = {
3925 .base = {
3926 .cra_name = "authenc(hmac(sha384),cbc(des))",
3927 .cra_driver_name = "authenc-hmac-sha384-"
3928 "cbc-des-caam",
3929 .cra_blocksize = DES_BLOCK_SIZE,
3930 },
3931 .setkey = aead_setkey,
3932 .setauthsize = aead_setauthsize,
3933 .encrypt = aead_encrypt,
3934 .decrypt = aead_decrypt,
3935 .ivsize = DES_BLOCK_SIZE,
3936 .maxauthsize = SHA384_DIGEST_SIZE,
3937 },
3938 .caam = {
3939 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3940 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3941 OP_ALG_AAI_HMAC_PRECOMP,
3942 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3943 },
3944 },
3945 {
3946 .aead = {
3947 .base = {
3948 .cra_name = "echainiv(authenc(hmac(sha384),"
3949 "cbc(des)))",
3950 .cra_driver_name = "echainiv-authenc-"
3951 "hmac-sha384-cbc-des-caam",
3952 .cra_blocksize = DES_BLOCK_SIZE,
3953 },
3954 .setkey = aead_setkey,
3955 .setauthsize = aead_setauthsize,
3956 .encrypt = aead_encrypt,
3957 .decrypt = aead_givdecrypt,
3958 .ivsize = DES_BLOCK_SIZE,
3959 .maxauthsize = SHA384_DIGEST_SIZE,
3960 },
3961 .caam = {
3962 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3963 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3964 OP_ALG_AAI_HMAC_PRECOMP,
3965 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3966 .geniv = true,
3967 },
3968 },
3969 {
3970 .aead = {
3971 .base = {
3972 .cra_name = "authenc(hmac(sha512),cbc(des))",
3973 .cra_driver_name = "authenc-hmac-sha512-"
3974 "cbc-des-caam",
3975 .cra_blocksize = DES_BLOCK_SIZE,
3976 },
3977 .setkey = aead_setkey,
3978 .setauthsize = aead_setauthsize,
3979 .encrypt = aead_encrypt,
3980 .decrypt = aead_decrypt,
3981 .ivsize = DES_BLOCK_SIZE,
3982 .maxauthsize = SHA512_DIGEST_SIZE,
3983 },
3984 .caam = {
3985 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3986 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3987 OP_ALG_AAI_HMAC_PRECOMP,
3988 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3989 },
3990 },
3991 {
3992 .aead = {
3993 .base = {
3994 .cra_name = "echainiv(authenc(hmac(sha512),"
3995 "cbc(des)))",
3996 .cra_driver_name = "echainiv-authenc-"
3997 "hmac-sha512-cbc-des-caam",
3998 .cra_blocksize = DES_BLOCK_SIZE,
3999 },
4000 .setkey = aead_setkey,
4001 .setauthsize = aead_setauthsize,
4002 .encrypt = aead_encrypt,
4003 .decrypt = aead_givdecrypt,
4004 .ivsize = DES_BLOCK_SIZE,
4005 .maxauthsize = SHA512_DIGEST_SIZE,
4006 },
4007 .caam = {
4008 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4009 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4010 OP_ALG_AAI_HMAC_PRECOMP,
4011 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4012 .geniv = true,
4013 },
4014 },
4015 {
4016 .aead = {
4017 .base = {
4018 .cra_name = "authenc(hmac(md5),"
4019 "rfc3686(ctr(aes)))",
4020 .cra_driver_name = "authenc-hmac-md5-"
4021 "rfc3686-ctr-aes-caam",
4022 .cra_blocksize = 1,
4023 },
4024 .setkey = aead_setkey,
4025 .setauthsize = aead_setauthsize,
4026 .encrypt = aead_encrypt,
4027 .decrypt = aead_decrypt,
4028 .ivsize = CTR_RFC3686_IV_SIZE,
4029 .maxauthsize = MD5_DIGEST_SIZE,
4030 },
4031 .caam = {
4032 .class1_alg_type = OP_ALG_ALGSEL_AES |
4033 OP_ALG_AAI_CTR_MOD128,
4034 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4035 OP_ALG_AAI_HMAC_PRECOMP,
4036 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4037 .rfc3686 = true,
4038 },
4039 },
4040 {
4041 .aead = {
4042 .base = {
4043 .cra_name = "seqiv(authenc("
4044 "hmac(md5),rfc3686(ctr(aes))))",
4045 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4046 "rfc3686-ctr-aes-caam",
4047 .cra_blocksize = 1,
4048 },
4049 .setkey = aead_setkey,
4050 .setauthsize = aead_setauthsize,
4051 .encrypt = aead_encrypt,
4052 .decrypt = aead_givdecrypt,
4053 .ivsize = CTR_RFC3686_IV_SIZE,
4054 .maxauthsize = MD5_DIGEST_SIZE,
4055 },
4056 .caam = {
4057 .class1_alg_type = OP_ALG_ALGSEL_AES |
4058 OP_ALG_AAI_CTR_MOD128,
4059 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4060 OP_ALG_AAI_HMAC_PRECOMP,
4061 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4062 .rfc3686 = true,
4063 .geniv = true,
4064 },
4065 },
4066 {
4067 .aead = {
4068 .base = {
4069 .cra_name = "authenc(hmac(sha1),"
4070 "rfc3686(ctr(aes)))",
4071 .cra_driver_name = "authenc-hmac-sha1-"
4072 "rfc3686-ctr-aes-caam",
4073 .cra_blocksize = 1,
4074 },
4075 .setkey = aead_setkey,
4076 .setauthsize = aead_setauthsize,
4077 .encrypt = aead_encrypt,
4078 .decrypt = aead_decrypt,
4079 .ivsize = CTR_RFC3686_IV_SIZE,
4080 .maxauthsize = SHA1_DIGEST_SIZE,
4081 },
4082 .caam = {
4083 .class1_alg_type = OP_ALG_ALGSEL_AES |
4084 OP_ALG_AAI_CTR_MOD128,
4085 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4086 OP_ALG_AAI_HMAC_PRECOMP,
4087 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4088 .rfc3686 = true,
4089 },
4090 },
4091 {
4092 .aead = {
4093 .base = {
4094 .cra_name = "seqiv(authenc("
4095 "hmac(sha1),rfc3686(ctr(aes))))",
4096 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4097 "rfc3686-ctr-aes-caam",
4098 .cra_blocksize = 1,
4099 },
4100 .setkey = aead_setkey,
4101 .setauthsize = aead_setauthsize,
4102 .encrypt = aead_encrypt,
4103 .decrypt = aead_givdecrypt,
4104 .ivsize = CTR_RFC3686_IV_SIZE,
4105 .maxauthsize = SHA1_DIGEST_SIZE,
4106 },
4107 .caam = {
4108 .class1_alg_type = OP_ALG_ALGSEL_AES |
4109 OP_ALG_AAI_CTR_MOD128,
4110 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4111 OP_ALG_AAI_HMAC_PRECOMP,
4112 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4113 .rfc3686 = true,
4114 .geniv = true,
4115 },
4116 },
4117 {
4118 .aead = {
4119 .base = {
4120 .cra_name = "authenc(hmac(sha224),"
4121 "rfc3686(ctr(aes)))",
4122 .cra_driver_name = "authenc-hmac-sha224-"
4123 "rfc3686-ctr-aes-caam",
4124 .cra_blocksize = 1,
4125 },
4126 .setkey = aead_setkey,
4127 .setauthsize = aead_setauthsize,
4128 .encrypt = aead_encrypt,
4129 .decrypt = aead_decrypt,
4130 .ivsize = CTR_RFC3686_IV_SIZE,
4131 .maxauthsize = SHA224_DIGEST_SIZE,
4132 },
4133 .caam = {
4134 .class1_alg_type = OP_ALG_ALGSEL_AES |
4135 OP_ALG_AAI_CTR_MOD128,
4136 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4137 OP_ALG_AAI_HMAC_PRECOMP,
4138 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4139 .rfc3686 = true,
4140 },
4141 },
4142 {
4143 .aead = {
4144 .base = {
4145 .cra_name = "seqiv(authenc("
4146 "hmac(sha224),rfc3686(ctr(aes))))",
4147 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4148 "rfc3686-ctr-aes-caam",
4149 .cra_blocksize = 1,
4150 },
4151 .setkey = aead_setkey,
4152 .setauthsize = aead_setauthsize,
4153 .encrypt = aead_encrypt,
4154 .decrypt = aead_givdecrypt,
4155 .ivsize = CTR_RFC3686_IV_SIZE,
4156 .maxauthsize = SHA224_DIGEST_SIZE,
4157 },
4158 .caam = {
4159 .class1_alg_type = OP_ALG_ALGSEL_AES |
4160 OP_ALG_AAI_CTR_MOD128,
4161 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4162 OP_ALG_AAI_HMAC_PRECOMP,
4163 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4164 .rfc3686 = true,
4165 .geniv = true,
4166 },
4167 },
4168 {
4169 .aead = {
4170 .base = {
4171 .cra_name = "authenc(hmac(sha256),"
4172 "rfc3686(ctr(aes)))",
4173 .cra_driver_name = "authenc-hmac-sha256-"
4174 "rfc3686-ctr-aes-caam",
4175 .cra_blocksize = 1,
4176 },
4177 .setkey = aead_setkey,
4178 .setauthsize = aead_setauthsize,
4179 .encrypt = aead_encrypt,
4180 .decrypt = aead_decrypt,
4181 .ivsize = CTR_RFC3686_IV_SIZE,
4182 .maxauthsize = SHA256_DIGEST_SIZE,
4183 },
4184 .caam = {
4185 .class1_alg_type = OP_ALG_ALGSEL_AES |
4186 OP_ALG_AAI_CTR_MOD128,
4187 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4188 OP_ALG_AAI_HMAC_PRECOMP,
4189 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4190 .rfc3686 = true,
4191 },
4192 },
4193 {
4194 .aead = {
4195 .base = {
4196 .cra_name = "seqiv(authenc(hmac(sha256),"
4197 "rfc3686(ctr(aes))))",
4198 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4199 "rfc3686-ctr-aes-caam",
4200 .cra_blocksize = 1,
4201 },
4202 .setkey = aead_setkey,
4203 .setauthsize = aead_setauthsize,
4204 .encrypt = aead_encrypt,
4205 .decrypt = aead_givdecrypt,
4206 .ivsize = CTR_RFC3686_IV_SIZE,
4207 .maxauthsize = SHA256_DIGEST_SIZE,
4208 },
4209 .caam = {
4210 .class1_alg_type = OP_ALG_ALGSEL_AES |
4211 OP_ALG_AAI_CTR_MOD128,
4212 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4213 OP_ALG_AAI_HMAC_PRECOMP,
4214 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4215 .rfc3686 = true,
4216 .geniv = true,
4217 },
4218 },
4219 {
4220 .aead = {
4221 .base = {
4222 .cra_name = "authenc(hmac(sha384),"
4223 "rfc3686(ctr(aes)))",
4224 .cra_driver_name = "authenc-hmac-sha384-"
4225 "rfc3686-ctr-aes-caam",
4226 .cra_blocksize = 1,
4227 },
4228 .setkey = aead_setkey,
4229 .setauthsize = aead_setauthsize,
4230 .encrypt = aead_encrypt,
4231 .decrypt = aead_decrypt,
4232 .ivsize = CTR_RFC3686_IV_SIZE,
4233 .maxauthsize = SHA384_DIGEST_SIZE,
4234 },
4235 .caam = {
4236 .class1_alg_type = OP_ALG_ALGSEL_AES |
4237 OP_ALG_AAI_CTR_MOD128,
4238 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4239 OP_ALG_AAI_HMAC_PRECOMP,
4240 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4241 .rfc3686 = true,
4242 },
4243 },
4244 {
4245 .aead = {
4246 .base = {
4247 .cra_name = "seqiv(authenc(hmac(sha384),"
4248 "rfc3686(ctr(aes))))",
4249 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4250 "rfc3686-ctr-aes-caam",
4251 .cra_blocksize = 1,
4252 },
4253 .setkey = aead_setkey,
4254 .setauthsize = aead_setauthsize,
4255 .encrypt = aead_encrypt,
4256 .decrypt = aead_givdecrypt,
4257 .ivsize = CTR_RFC3686_IV_SIZE,
4258 .maxauthsize = SHA384_DIGEST_SIZE,
4259 },
4260 .caam = {
4261 .class1_alg_type = OP_ALG_ALGSEL_AES |
4262 OP_ALG_AAI_CTR_MOD128,
4263 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4264 OP_ALG_AAI_HMAC_PRECOMP,
4265 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4266 .rfc3686 = true,
4267 .geniv = true,
4268 },
4269 },
4270 {
4271 .aead = {
4272 .base = {
4273 .cra_name = "authenc(hmac(sha512),"
4274 "rfc3686(ctr(aes)))",
4275 .cra_driver_name = "authenc-hmac-sha512-"
4276 "rfc3686-ctr-aes-caam",
4277 .cra_blocksize = 1,
4278 },
4279 .setkey = aead_setkey,
4280 .setauthsize = aead_setauthsize,
4281 .encrypt = aead_encrypt,
4282 .decrypt = aead_decrypt,
4283 .ivsize = CTR_RFC3686_IV_SIZE,
4284 .maxauthsize = SHA512_DIGEST_SIZE,
4285 },
4286 .caam = {
4287 .class1_alg_type = OP_ALG_ALGSEL_AES |
4288 OP_ALG_AAI_CTR_MOD128,
4289 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4290 OP_ALG_AAI_HMAC_PRECOMP,
4291 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4292 .rfc3686 = true,
4293 },
4294 },
4295 {
4296 .aead = {
4297 .base = {
4298 .cra_name = "seqiv(authenc(hmac(sha512),"
4299 "rfc3686(ctr(aes))))",
4300 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4301 "rfc3686-ctr-aes-caam",
4302 .cra_blocksize = 1,
4303 },
4304 .setkey = aead_setkey,
4305 .setauthsize = aead_setauthsize,
4306 .encrypt = aead_encrypt,
4307 .decrypt = aead_givdecrypt,
4308 .ivsize = CTR_RFC3686_IV_SIZE,
4309 .maxauthsize = SHA512_DIGEST_SIZE,
4310 },
4311 .caam = {
4312 .class1_alg_type = OP_ALG_ALGSEL_AES |
4313 OP_ALG_AAI_CTR_MOD128,
4314 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4315 OP_ALG_AAI_HMAC_PRECOMP,
4316 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4317 .rfc3686 = true,
4318 .geniv = true,
4319 },
4320 },
Herbert Xuf2147b82015-06-16 13:54:23 +08004321};
4322
4323struct caam_crypto_alg {
4324 struct crypto_alg crypto_alg;
4325 struct list_head entry;
4326 struct caam_alg_entry caam;
4327};
4328
4329static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4330{
4331 ctx->jrdev = caam_jr_alloc();
4332 if (IS_ERR(ctx->jrdev)) {
4333 pr_err("Job Ring Device allocation for transform failed\n");
4334 return PTR_ERR(ctx->jrdev);
4335 }
4336
4337 /* copy descriptor header template value */
4338 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4339 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4340 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4341
4342 return 0;
4343}
4344
Kim Phillips8e8ec592011-03-13 16:54:26 +08004345static int caam_cra_init(struct crypto_tfm *tfm)
4346{
4347 struct crypto_alg *alg = tfm->__crt_alg;
4348 struct caam_crypto_alg *caam_alg =
4349 container_of(alg, struct caam_crypto_alg, crypto_alg);
4350 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004351
Herbert Xuf2147b82015-06-16 13:54:23 +08004352 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004353}
4354
Herbert Xuf2147b82015-06-16 13:54:23 +08004355static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004356{
Herbert Xuf2147b82015-06-16 13:54:23 +08004357 struct aead_alg *alg = crypto_aead_alg(tfm);
4358 struct caam_aead_alg *caam_alg =
4359 container_of(alg, struct caam_aead_alg, aead);
4360 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004361
Herbert Xuf2147b82015-06-16 13:54:23 +08004362 return caam_init_common(ctx, &caam_alg->caam);
4363}
4364
4365static void caam_exit_common(struct caam_ctx *ctx)
4366{
Yuan Kang1acebad2011-07-15 11:21:42 +08004367 if (ctx->sh_desc_enc_dma &&
4368 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4369 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4370 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4371 if (ctx->sh_desc_dec_dma &&
4372 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4373 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4374 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4375 if (ctx->sh_desc_givenc_dma &&
4376 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4377 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4378 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05004379 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02004380 if (ctx->key_dma &&
4381 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4382 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4383 ctx->enckeylen + ctx->split_key_pad_len,
4384 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304385
4386 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004387}
4388
Herbert Xuf2147b82015-06-16 13:54:23 +08004389static void caam_cra_exit(struct crypto_tfm *tfm)
4390{
4391 caam_exit_common(crypto_tfm_ctx(tfm));
4392}
4393
4394static void caam_aead_exit(struct crypto_aead *tfm)
4395{
4396 caam_exit_common(crypto_aead_ctx(tfm));
4397}
4398
Kim Phillips8e8ec592011-03-13 16:54:26 +08004399static void __exit caam_algapi_exit(void)
4400{
4401
Kim Phillips8e8ec592011-03-13 16:54:26 +08004402 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08004403 int i;
4404
4405 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4406 struct caam_aead_alg *t_alg = driver_aeads + i;
4407
4408 if (t_alg->registered)
4409 crypto_unregister_aead(&t_alg->aead);
4410 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004411
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304412 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004413 return;
4414
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304415 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004416 crypto_unregister_alg(&t_alg->crypto_alg);
4417 list_del(&t_alg->entry);
4418 kfree(t_alg);
4419 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004420}
4421
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304422static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08004423 *template)
4424{
4425 struct caam_crypto_alg *t_alg;
4426 struct crypto_alg *alg;
4427
Fabio Estevam9c4f9732015-08-21 13:52:00 -03004428 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004429 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304430 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004431 return ERR_PTR(-ENOMEM);
4432 }
4433
4434 alg = &t_alg->crypto_alg;
4435
4436 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4437 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4438 template->driver_name);
4439 alg->cra_module = THIS_MODULE;
4440 alg->cra_init = caam_cra_init;
4441 alg->cra_exit = caam_cra_exit;
4442 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004443 alg->cra_blocksize = template->blocksize;
4444 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004445 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01004446 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4447 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08004448 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004449 case CRYPTO_ALG_TYPE_GIVCIPHER:
4450 alg->cra_type = &crypto_givcipher_type;
4451 alg->cra_ablkcipher = template->template_ablkcipher;
4452 break;
Yuan Kangacdca312011-07-15 11:21:42 +08004453 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4454 alg->cra_type = &crypto_ablkcipher_type;
4455 alg->cra_ablkcipher = template->template_ablkcipher;
4456 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08004457 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004458
Herbert Xuf2147b82015-06-16 13:54:23 +08004459 t_alg->caam.class1_alg_type = template->class1_alg_type;
4460 t_alg->caam.class2_alg_type = template->class2_alg_type;
4461 t_alg->caam.alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004462
4463 return t_alg;
4464}
4465
Herbert Xuf2147b82015-06-16 13:54:23 +08004466static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4467{
4468 struct aead_alg *alg = &t_alg->aead;
4469
4470 alg->base.cra_module = THIS_MODULE;
4471 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4472 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu5e4b8c12015-08-13 17:29:06 +08004473 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Herbert Xuf2147b82015-06-16 13:54:23 +08004474
4475 alg->init = caam_aead_init;
4476 alg->exit = caam_aead_exit;
4477}
4478
Kim Phillips8e8ec592011-03-13 16:54:26 +08004479static int __init caam_algapi_init(void)
4480{
Ruchika Gupta35af6402014-07-07 10:42:12 +05304481 struct device_node *dev_node;
4482 struct platform_device *pdev;
4483 struct device *ctrldev;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004484 struct caam_drv_private *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004485 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004486 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4487 unsigned int md_limit = SHA512_DIGEST_SIZE;
Herbert Xuf2147b82015-06-16 13:54:23 +08004488 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004489
Ruchika Gupta35af6402014-07-07 10:42:12 +05304490 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4491 if (!dev_node) {
4492 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4493 if (!dev_node)
4494 return -ENODEV;
4495 }
4496
4497 pdev = of_find_device_by_node(dev_node);
4498 if (!pdev) {
4499 of_node_put(dev_node);
4500 return -ENODEV;
4501 }
4502
4503 ctrldev = &pdev->dev;
4504 priv = dev_get_drvdata(ctrldev);
4505 of_node_put(dev_node);
4506
4507 /*
4508 * If priv is NULL, it's probably because the caam driver wasn't
4509 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4510 */
4511 if (!priv)
4512 return -ENODEV;
4513
4514
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304515 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004516
Victoria Milhoanbf834902015-08-05 11:28:48 -07004517 /*
4518 * Register crypto algorithms the device supports.
4519 * First, detect presence and attributes of DES, AES, and MD blocks.
4520 */
4521 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4522 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4523 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4524 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4525 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004526
Victoria Milhoanbf834902015-08-05 11:28:48 -07004527 /* If MD is present, limit digest size based on LP256 */
4528 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4529 md_limit = SHA256_DIGEST_SIZE;
4530
4531 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4532 struct caam_crypto_alg *t_alg;
4533 struct caam_alg_template *alg = driver_algs + i;
4534 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4535
4536 /* Skip DES algorithms if not supported by device */
4537 if (!des_inst &&
4538 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4539 (alg_sel == OP_ALG_ALGSEL_DES)))
4540 continue;
4541
4542 /* Skip AES algorithms if not supported by device */
4543 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4544 continue;
4545
4546 t_alg = caam_alg_alloc(alg);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004547 if (IS_ERR(t_alg)) {
4548 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07004549 pr_warn("%s alg allocation failed\n", alg->driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004550 continue;
4551 }
4552
4553 err = crypto_register_alg(&t_alg->crypto_alg);
4554 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304555 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004556 t_alg->crypto_alg.cra_driver_name);
4557 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08004558 continue;
4559 }
4560
4561 list_add_tail(&t_alg->entry, &alg_list);
4562 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004563 }
Herbert Xuf2147b82015-06-16 13:54:23 +08004564
4565 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4566 struct caam_aead_alg *t_alg = driver_aeads + i;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004567 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4568 OP_ALG_ALGSEL_MASK;
4569 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4570 OP_ALG_ALGSEL_MASK;
4571 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4572
4573 /* Skip DES algorithms if not supported by device */
4574 if (!des_inst &&
4575 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4576 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4577 continue;
4578
4579 /* Skip AES algorithms if not supported by device */
4580 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4581 continue;
4582
4583 /*
4584 * Check support for AES algorithms not available
4585 * on LP devices.
4586 */
4587 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4588 if (alg_aai == OP_ALG_AAI_GCM)
4589 continue;
4590
4591 /*
4592 * Skip algorithms requiring message digests
4593 * if MD or MD size is not supported by device.
4594 */
4595 if (c2_alg_sel &&
4596 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4597 continue;
Herbert Xuf2147b82015-06-16 13:54:23 +08004598
4599 caam_aead_alg_init(t_alg);
4600
4601 err = crypto_register_aead(&t_alg->aead);
4602 if (err) {
4603 pr_warn("%s alg registration failed\n",
4604 t_alg->aead.base.cra_driver_name);
4605 continue;
4606 }
4607
4608 t_alg->registered = true;
4609 registered = true;
4610 }
4611
4612 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304613 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004614
4615 return err;
4616}
4617
4618module_init(caam_algapi_init);
4619module_exit(caam_algapi_exit);
4620
4621MODULE_LICENSE("GPL");
4622MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4623MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");