blob: cb26a57c5aafb74ead8c08bf6e5dc324f2fe362d [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
Herbert Xu479bcc72015-07-30 17:53:17 +080071#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
Herbert Xuf2147b82015-06-16 13:54:23 +080073
Kim Phillips4427b1b2011-05-14 22:08:17 -050074/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080075#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080076#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080079
Catalin Vasiledaebc462014-10-31 12:45:37 +020080/* Note: Nonce is counted in enckeylen */
Herbert Xu479bcc72015-07-30 17:53:17 +080081#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
Catalin Vasiledaebc462014-10-31 12:45:37 +020082
Horia Geantaae4a8252014-03-14 17:46:52 +020083#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080084#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
Horia Geantaae4a8252014-03-14 17:46:52 +020086
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030087#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080088#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030090
Tudor Ambarusbac68f22014-10-23 16:14:03 +030091#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
Horia Geant?4aad0cc2015-07-30 22:11:18 +030092#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
Tudor Ambarusbac68f22014-10-23 16:14:03 +030094
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020095#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080096#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020098
Yuan Kangacdca312011-07-15 11:21:42 +080099#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
104
Herbert Xu87e51b02015-06-18 14:25:55 +0800105#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500107
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108#ifdef DEBUG
109/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800110#define debug(format, arg...) printk(format, arg)
111#else
112#define debug(format, arg...)
113#endif
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300114
115#ifdef DEBUG
116#include <linux/highmem.h>
117
118static void dbg_dump_sg(const char *level, const char *prefix_str,
119 int prefix_type, int rowsize, int groupsize,
Horia Geantă00fef2b2016-11-09 10:46:16 +0200120 struct scatterlist *sg, size_t tlen, bool ascii)
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300121{
122 struct scatterlist *it;
123 void *it_page;
124 size_t len;
125 void *buf;
126
127 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
128 /*
129 * make sure the scatterlist's page
130 * has a valid virtual memory mapping
131 */
132 it_page = kmap_atomic(sg_page(it));
133 if (unlikely(!it_page)) {
134 printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
135 return;
136 }
137
138 buf = it_page + it->offset;
Arnd Bergmannd69985a2016-10-25 23:29:10 +0200139 len = min_t(size_t, tlen, it->length);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300140 print_hex_dump(level, prefix_str, prefix_type, rowsize,
141 groupsize, buf, len, ascii);
142 tlen -= len;
143
144 kunmap_atomic(it_page);
145 }
146}
147#endif
148
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530149static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800150
Herbert Xu479bcc72015-07-30 17:53:17 +0800151struct caam_alg_entry {
152 int class1_alg_type;
153 int class2_alg_type;
154 int alg_op;
155 bool rfc3686;
156 bool geniv;
157};
158
159struct caam_aead_alg {
160 struct aead_alg aead;
161 struct caam_alg_entry caam;
162 bool registered;
163};
164
Yuan Kang1acebad2011-07-15 11:21:42 +0800165/* Set DK bit in class 1 operation if shared */
166static inline void append_dec_op1(u32 *desc, u32 type)
167{
168 u32 *jump_cmd, *uncond_jump_cmd;
169
Horia Geantaa60384d2014-07-11 15:46:58 +0300170 /* DK bit is valid only for AES */
171 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
172 append_operation(desc, type | OP_ALG_AS_INITFINAL |
173 OP_ALG_DECRYPT);
174 return;
175 }
176
Yuan Kang1acebad2011-07-15 11:21:42 +0800177 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
178 append_operation(desc, type | OP_ALG_AS_INITFINAL |
179 OP_ALG_DECRYPT);
180 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
181 set_jump_tgt_here(desc, jump_cmd);
182 append_operation(desc, type | OP_ALG_AS_INITFINAL |
183 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
184 set_jump_tgt_here(desc, uncond_jump_cmd);
185}
186
187/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800188 * For aead functions, read payload and write payload,
189 * both of which are specified in req->src and req->dst
190 */
191static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
192{
Horia Geantaae4a8252014-03-14 17:46:52 +0200193 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800194 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
195 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800196}
197
198/*
Yuan Kangacdca312011-07-15 11:21:42 +0800199 * For ablkcipher encrypt and decrypt, read from req->src and
200 * write to req->dst
201 */
202static inline void ablkcipher_append_src_dst(u32 *desc)
203{
Kim Phillips70d793c2012-06-22 19:42:35 -0500204 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
205 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
206 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
207 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
208 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800209}
210
211/*
Kim Phillips8e8ec592011-03-13 16:54:26 +0800212 * per-session context
213 */
214struct caam_ctx {
215 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800216 u32 sh_desc_enc[DESC_MAX_USED_LEN];
217 u32 sh_desc_dec[DESC_MAX_USED_LEN];
218 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
219 dma_addr_t sh_desc_enc_dma;
220 dma_addr_t sh_desc_dec_dma;
221 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800222 u32 class1_alg_type;
223 u32 class2_alg_type;
224 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800225 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800226 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800227 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800228 unsigned int split_key_len;
229 unsigned int split_key_pad_len;
230 unsigned int authsize;
231};
232
Yuan Kang1acebad2011-07-15 11:21:42 +0800233static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200234 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800235{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200236 u32 *nonce;
237 unsigned int enckeylen = ctx->enckeylen;
238
239 /*
240 * RFC3686 specific:
241 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
242 * | enckeylen = encryption key size + nonce size
243 */
244 if (is_rfc3686)
245 enckeylen -= CTR_RFC3686_NONCE_SIZE;
246
Yuan Kang1acebad2011-07-15 11:21:42 +0800247 if (keys_fit_inline) {
248 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
249 ctx->split_key_len, CLASS_2 |
250 KEY_DEST_MDHA_SPLIT | KEY_ENC);
251 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200252 ctx->split_key_pad_len, enckeylen,
253 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad2011-07-15 11:21:42 +0800254 } else {
255 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
256 KEY_DEST_MDHA_SPLIT | KEY_ENC);
257 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200258 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
259 }
260
261 /* Load Counter into CONTEXT1 reg */
262 if (is_rfc3686) {
263 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
264 enckeylen);
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300265 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
266 LDST_CLASS_IND_CCB |
267 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200268 append_move(desc,
269 MOVE_SRC_OUTFIFO |
270 MOVE_DEST_CLASS1CTX |
271 (16 << MOVE_OFFSET_SHIFT) |
272 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800273 }
274}
275
276static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200277 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800278{
279 u32 *key_jump_cmd;
280
Catalin Vasiledaebc462014-10-31 12:45:37 +0200281 /* Note: Context registers are saved. */
282 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad2011-07-15 11:21:42 +0800283
284 /* Skip if already shared */
285 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
286 JUMP_COND_SHRD);
287
Catalin Vasiledaebc462014-10-31 12:45:37 +0200288 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800289
290 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800291}
292
Horia Geantaae4a8252014-03-14 17:46:52 +0200293static int aead_null_set_sh_desc(struct crypto_aead *aead)
294{
Horia Geantaae4a8252014-03-14 17:46:52 +0200295 struct caam_ctx *ctx = crypto_aead_ctx(aead);
296 struct device *jrdev = ctx->jrdev;
297 bool keys_fit_inline = false;
298 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
299 u32 *desc;
300
301 /*
302 * Job Descriptor and Shared Descriptors
303 * must all fit into the 64-word Descriptor h/w Buffer
304 */
Herbert Xu479bcc72015-07-30 17:53:17 +0800305 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
Horia Geantaae4a8252014-03-14 17:46:52 +0200306 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
307 keys_fit_inline = true;
308
Herbert Xu479bcc72015-07-30 17:53:17 +0800309 /* aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200310 desc = ctx->sh_desc_enc;
311
312 init_sh_desc(desc, HDR_SHARE_SERIAL);
313
314 /* Skip if already shared */
315 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
316 JUMP_COND_SHRD);
317 if (keys_fit_inline)
318 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
319 ctx->split_key_len, CLASS_2 |
320 KEY_DEST_MDHA_SPLIT | KEY_ENC);
321 else
322 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
323 KEY_DEST_MDHA_SPLIT | KEY_ENC);
324 set_jump_tgt_here(desc, key_jump_cmd);
325
Herbert Xu479bcc72015-07-30 17:53:17 +0800326 /* assoclen + cryptlen = seqinlen */
327 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200328
Herbert Xu479bcc72015-07-30 17:53:17 +0800329 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200330 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
331 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
332
333 /*
334 * MOVE_LEN opcode is not available in all SEC HW revisions,
335 * thus need to do some magic, i.e. self-patch the descriptor
336 * buffer.
337 */
338 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
339 MOVE_DEST_MATH3 |
340 (0x6 << MOVE_LEN_SHIFT));
341 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
342 MOVE_DEST_DESCBUF |
343 MOVE_WAITCOMP |
344 (0x8 << MOVE_LEN_SHIFT));
345
346 /* Class 2 operation */
347 append_operation(desc, ctx->class2_alg_type |
348 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
349
350 /* Read and write cryptlen bytes */
351 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
352
353 set_move_tgt_here(desc, read_move_cmd);
354 set_move_tgt_here(desc, write_move_cmd);
355 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
356 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
357 MOVE_AUX_LS);
358
359 /* Write ICV */
360 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
361 LDST_SRCDST_BYTE_CONTEXT);
362
363 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
364 desc_bytes(desc),
365 DMA_TO_DEVICE);
366 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
367 dev_err(jrdev, "unable to map shared descriptor\n");
368 return -ENOMEM;
369 }
370#ifdef DEBUG
371 print_hex_dump(KERN_ERR,
372 "aead null enc shdesc@"__stringify(__LINE__)": ",
373 DUMP_PREFIX_ADDRESS, 16, 4, desc,
374 desc_bytes(desc), 1);
375#endif
376
377 /*
378 * Job Descriptor and Shared Descriptors
379 * must all fit into the 64-word Descriptor h/w Buffer
380 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500381 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200382 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
383 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
384 keys_fit_inline = true;
385
386 desc = ctx->sh_desc_dec;
387
Herbert Xu479bcc72015-07-30 17:53:17 +0800388 /* aead_decrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200389 init_sh_desc(desc, HDR_SHARE_SERIAL);
390
391 /* Skip if already shared */
392 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
393 JUMP_COND_SHRD);
394 if (keys_fit_inline)
395 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
396 ctx->split_key_len, CLASS_2 |
397 KEY_DEST_MDHA_SPLIT | KEY_ENC);
398 else
399 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
400 KEY_DEST_MDHA_SPLIT | KEY_ENC);
401 set_jump_tgt_here(desc, key_jump_cmd);
402
403 /* Class 2 operation */
404 append_operation(desc, ctx->class2_alg_type |
405 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
406
Herbert Xu479bcc72015-07-30 17:53:17 +0800407 /* assoclen + cryptlen = seqoutlen */
Horia Geantaae4a8252014-03-14 17:46:52 +0200408 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200409
Herbert Xu479bcc72015-07-30 17:53:17 +0800410 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200411 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
412 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
413
414 /*
415 * MOVE_LEN opcode is not available in all SEC HW revisions,
416 * thus need to do some magic, i.e. self-patch the descriptor
417 * buffer.
418 */
419 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
420 MOVE_DEST_MATH2 |
421 (0x6 << MOVE_LEN_SHIFT));
422 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
423 MOVE_DEST_DESCBUF |
424 MOVE_WAITCOMP |
425 (0x8 << MOVE_LEN_SHIFT));
426
427 /* Read and write cryptlen bytes */
428 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
429
430 /*
431 * Insert a NOP here, since we need at least 4 instructions between
432 * code patching the descriptor buffer and the location being patched.
433 */
434 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
435 set_jump_tgt_here(desc, jump_cmd);
436
437 set_move_tgt_here(desc, read_move_cmd);
438 set_move_tgt_here(desc, write_move_cmd);
439 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
440 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
441 MOVE_AUX_LS);
442 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
443
444 /* Load ICV */
445 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
446 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
447
448 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
449 desc_bytes(desc),
450 DMA_TO_DEVICE);
451 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
452 dev_err(jrdev, "unable to map shared descriptor\n");
453 return -ENOMEM;
454 }
455#ifdef DEBUG
456 print_hex_dump(KERN_ERR,
457 "aead null dec shdesc@"__stringify(__LINE__)": ",
458 DUMP_PREFIX_ADDRESS, 16, 4, desc,
459 desc_bytes(desc), 1);
460#endif
461
462 return 0;
463}
464
Yuan Kang1acebad2011-07-15 11:21:42 +0800465static int aead_set_sh_desc(struct crypto_aead *aead)
466{
Herbert Xu479bcc72015-07-30 17:53:17 +0800467 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
468 struct caam_aead_alg, aead);
Herbert Xuadd86d52015-05-11 17:47:50 +0800469 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800470 struct caam_ctx *ctx = crypto_aead_ctx(aead);
471 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200472 bool keys_fit_inline;
Yuan Kang1acebad2011-07-15 11:21:42 +0800473 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200474 u32 ctx1_iv_off = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +0800475 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200476 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
477 OP_ALG_AAI_CTR_MOD128);
Herbert Xu479bcc72015-07-30 17:53:17 +0800478 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +0800479
Horia Geantă2fdea252016-08-04 20:02:47 +0300480 if (!ctx->authsize)
481 return 0;
482
Horia Geantaae4a8252014-03-14 17:46:52 +0200483 /* NULL encryption / decryption */
484 if (!ctx->enckeylen)
485 return aead_null_set_sh_desc(aead);
486
Yuan Kang1acebad2011-07-15 11:21:42 +0800487 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200488 * AES-CTR needs to load IV in CONTEXT1 reg
489 * at an offset of 128bits (16bytes)
490 * CONTEXT1[255:128] = IV
491 */
492 if (ctr_mode)
493 ctx1_iv_off = 16;
494
495 /*
496 * RFC3686 specific:
497 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
498 */
499 if (is_rfc3686)
500 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
501
Herbert Xu479bcc72015-07-30 17:53:17 +0800502 if (alg->caam.geniv)
503 goto skip_enc;
504
Catalin Vasiledaebc462014-10-31 12:45:37 +0200505 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800506 * Job Descriptor and Shared Descriptors
507 * must all fit into the 64-word Descriptor h/w Buffer
508 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200509 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800510 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200511 ctx->split_key_pad_len + ctx->enckeylen +
512 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800513 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800514 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800515
Herbert Xu479bcc72015-07-30 17:53:17 +0800516 /* aead_encrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800517 desc = ctx->sh_desc_enc;
518
Catalin Vasiledaebc462014-10-31 12:45:37 +0200519 /* Note: Context registers are saved. */
520 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800521
522 /* Class 2 operation */
523 append_operation(desc, ctx->class2_alg_type |
524 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
525
Herbert Xu479bcc72015-07-30 17:53:17 +0800526 /* Read and write assoclen bytes */
527 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
528 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800529
Herbert Xu479bcc72015-07-30 17:53:17 +0800530 /* Skip assoc data */
531 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800532
533 /* read assoc before reading payload */
534 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
Herbert Xu479bcc72015-07-30 17:53:17 +0800535 FIFOLDST_VLF);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200536
537 /* Load Counter into CONTEXT1 reg */
538 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300539 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
540 LDST_SRCDST_BYTE_CONTEXT |
541 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
542 LDST_OFFSET_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800543
544 /* Class 1 operation */
545 append_operation(desc, ctx->class1_alg_type |
546 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
547
548 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800549 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
550 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800551 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
552
553 /* Write ICV */
554 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
555 LDST_SRCDST_BYTE_CONTEXT);
556
557 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
558 desc_bytes(desc),
559 DMA_TO_DEVICE);
560 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
561 dev_err(jrdev, "unable to map shared descriptor\n");
562 return -ENOMEM;
563 }
564#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300565 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800566 DUMP_PREFIX_ADDRESS, 16, 4, desc,
567 desc_bytes(desc), 1);
568#endif
569
Herbert Xu479bcc72015-07-30 17:53:17 +0800570skip_enc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800571 /*
572 * Job Descriptor and Shared Descriptors
573 * must all fit into the 64-word Descriptor h/w Buffer
574 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500575 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800576 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200577 ctx->split_key_pad_len + ctx->enckeylen +
578 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800579 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800580 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800581
Herbert Xu479bcc72015-07-30 17:53:17 +0800582 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800583 desc = ctx->sh_desc_dec;
584
Catalin Vasiledaebc462014-10-31 12:45:37 +0200585 /* Note: Context registers are saved. */
586 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800587
588 /* Class 2 operation */
589 append_operation(desc, ctx->class2_alg_type |
590 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
591
Herbert Xu479bcc72015-07-30 17:53:17 +0800592 /* Read and write assoclen bytes */
593 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Horia Geantă8b18e232016-08-29 14:52:14 +0300594 if (alg->caam.geniv)
595 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
596 else
597 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Herbert Xu479bcc72015-07-30 17:53:17 +0800598
599 /* Skip assoc data */
600 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800601
602 /* read assoc before reading payload */
603 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
604 KEY_VLF);
605
Horia Geantă8b18e232016-08-29 14:52:14 +0300606 if (alg->caam.geniv) {
607 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
608 LDST_SRCDST_BYTE_CONTEXT |
609 (ctx1_iv_off << LDST_OFFSET_SHIFT));
610 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
611 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
612 }
613
Catalin Vasiledaebc462014-10-31 12:45:37 +0200614 /* Load Counter into CONTEXT1 reg */
615 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300616 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
617 LDST_SRCDST_BYTE_CONTEXT |
618 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
619 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200620
621 /* Choose operation */
622 if (ctr_mode)
623 append_operation(desc, ctx->class1_alg_type |
624 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
625 else
626 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad2011-07-15 11:21:42 +0800627
628 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800629 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
630 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800631 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
632
633 /* Load ICV */
634 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
635 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800636
637 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638 desc_bytes(desc),
639 DMA_TO_DEVICE);
640 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
641 dev_err(jrdev, "unable to map shared descriptor\n");
642 return -ENOMEM;
643 }
644#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300645 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800646 DUMP_PREFIX_ADDRESS, 16, 4, desc,
647 desc_bytes(desc), 1);
648#endif
649
Herbert Xu479bcc72015-07-30 17:53:17 +0800650 if (!alg->caam.geniv)
651 goto skip_givenc;
652
Yuan Kang1acebad2011-07-15 11:21:42 +0800653 /*
654 * Job Descriptor and Shared Descriptors
655 * must all fit into the 64-word Descriptor h/w Buffer
656 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500657 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800658 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200659 ctx->split_key_pad_len + ctx->enckeylen +
660 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800661 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800662 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800663
664 /* aead_givencrypt shared descriptor */
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300665 desc = ctx->sh_desc_enc;
Yuan Kang1acebad2011-07-15 11:21:42 +0800666
Catalin Vasiledaebc462014-10-31 12:45:37 +0200667 /* Note: Context registers are saved. */
668 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800669
Herbert Xu479bcc72015-07-30 17:53:17 +0800670 if (is_rfc3686)
671 goto copy_iv;
672
Yuan Kang1acebad2011-07-15 11:21:42 +0800673 /* Generate IV */
674 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
675 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800676 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800677 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
678 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
679 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200680 append_move(desc, MOVE_WAITCOMP |
681 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
682 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800683 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800684 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
685
Herbert Xu479bcc72015-07-30 17:53:17 +0800686copy_iv:
Yuan Kang1acebad2011-07-15 11:21:42 +0800687 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200688 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
689 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800690 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800691
692 /* Return to encryption */
693 append_operation(desc, ctx->class2_alg_type |
694 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
695
Herbert Xu479bcc72015-07-30 17:53:17 +0800696 /* Read and write assoclen bytes */
697 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
698 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
699
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300700 /* ivsize + cryptlen = seqoutlen - authsize */
701 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
702
Herbert Xu479bcc72015-07-30 17:53:17 +0800703 /* Skip assoc data */
704 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800705
706 /* read assoc before reading payload */
707 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
708 KEY_VLF);
709
Catalin Vasiledaebc462014-10-31 12:45:37 +0200710 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad2011-07-15 11:21:42 +0800711 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800712 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800713 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
714 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
Herbert Xuadd86d52015-05-11 17:47:50 +0800715 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
Yuan Kang1acebad2011-07-15 11:21:42 +0800716 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
717
Catalin Vasiledaebc462014-10-31 12:45:37 +0200718 /* Load Counter into CONTEXT1 reg */
719 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300720 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
721 LDST_SRCDST_BYTE_CONTEXT |
722 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
723 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200724
Yuan Kang1acebad2011-07-15 11:21:42 +0800725 /* Class 1 operation */
726 append_operation(desc, ctx->class1_alg_type |
727 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
728
729 /* Will write ivsize + cryptlen */
730 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
731
732 /* Not need to reload iv */
Herbert Xuadd86d52015-05-11 17:47:50 +0800733 append_seq_fifo_load(desc, ivsize,
Yuan Kang1acebad2011-07-15 11:21:42 +0800734 FIFOLD_CLASS_SKIP);
735
736 /* Will read cryptlen */
737 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Alex Porosanud128af12016-11-09 10:46:11 +0200738 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
739 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
740 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800741
742 /* Write ICV */
743 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
744 LDST_SRCDST_BYTE_CONTEXT);
745
Herbert Xu479bcc72015-07-30 17:53:17 +0800746 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
747 desc_bytes(desc),
748 DMA_TO_DEVICE);
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300749 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
Yuan Kang1acebad2011-07-15 11:21:42 +0800750 dev_err(jrdev, "unable to map shared descriptor\n");
751 return -ENOMEM;
752 }
753#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300754 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800755 DUMP_PREFIX_ADDRESS, 16, 4, desc,
756 desc_bytes(desc), 1);
757#endif
758
Herbert Xu479bcc72015-07-30 17:53:17 +0800759skip_givenc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800760 return 0;
761}
762
Yuan Kang0e479302011-07-15 11:21:41 +0800763static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800764 unsigned int authsize)
765{
766 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
767
768 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800769 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800770
771 return 0;
772}
773
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300774static int gcm_set_sh_desc(struct crypto_aead *aead)
775{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300776 struct caam_ctx *ctx = crypto_aead_ctx(aead);
777 struct device *jrdev = ctx->jrdev;
778 bool keys_fit_inline = false;
779 u32 *key_jump_cmd, *zero_payload_jump_cmd,
780 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
781 u32 *desc;
782
783 if (!ctx->enckeylen || !ctx->authsize)
784 return 0;
785
786 /*
787 * AES GCM encrypt shared descriptor
788 * Job Descriptor and Shared Descriptor
789 * must fit into the 64-word Descriptor h/w Buffer
790 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800791 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300792 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
793 keys_fit_inline = true;
794
795 desc = ctx->sh_desc_enc;
796
797 init_sh_desc(desc, HDR_SHARE_SERIAL);
798
799 /* skip key loading if they are loaded due to sharing */
800 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
801 JUMP_COND_SHRD | JUMP_COND_SELF);
802 if (keys_fit_inline)
803 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
804 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
805 else
806 append_key(desc, ctx->key_dma, ctx->enckeylen,
807 CLASS_1 | KEY_DEST_CLASS_REG);
808 set_jump_tgt_here(desc, key_jump_cmd);
809
810 /* class 1 operation */
811 append_operation(desc, ctx->class1_alg_type |
812 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
813
Herbert Xuf2147b82015-06-16 13:54:23 +0800814 /* if assoclen + cryptlen is ZERO, skip to ICV write */
815 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
816 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
817 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300818
819 /* if assoclen is ZERO, skip reading the assoc data */
Herbert Xuf2147b82015-06-16 13:54:23 +0800820 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300821 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800822 JUMP_COND_MATH_Z);
823
824 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
825
826 /* skip assoc data */
827 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
828
829 /* cryptlen = seqinlen - assoclen */
830 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
831
832 /* if cryptlen is ZERO jump to zero-payload commands */
833 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
834 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300835
836 /* read assoc data */
837 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
838 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
839 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
840
Herbert Xuf2147b82015-06-16 13:54:23 +0800841 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300842
843 /* write encrypted data */
844 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
845
846 /* read payload data */
847 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
848 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
849
850 /* jump the zero-payload commands */
Herbert Xuf2147b82015-06-16 13:54:23 +0800851 append_jump(desc, JUMP_TEST_ALL | 2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300852
853 /* zero-payload commands */
854 set_jump_tgt_here(desc, zero_payload_jump_cmd);
855
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300856 /* read assoc data */
857 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
858 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
859
Herbert Xuf2147b82015-06-16 13:54:23 +0800860 /* There is no input data */
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300861 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300862
863 /* write ICV */
864 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
865 LDST_SRCDST_BYTE_CONTEXT);
866
867 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
868 desc_bytes(desc),
869 DMA_TO_DEVICE);
870 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
871 dev_err(jrdev, "unable to map shared descriptor\n");
872 return -ENOMEM;
873 }
874#ifdef DEBUG
875 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
876 DUMP_PREFIX_ADDRESS, 16, 4, desc,
877 desc_bytes(desc), 1);
878#endif
879
880 /*
881 * Job Descriptor and Shared Descriptors
882 * must all fit into the 64-word Descriptor h/w Buffer
883 */
884 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800885 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300886 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
887 keys_fit_inline = true;
888
889 desc = ctx->sh_desc_dec;
890
891 init_sh_desc(desc, HDR_SHARE_SERIAL);
892
893 /* skip key loading if they are loaded due to sharing */
894 key_jump_cmd = append_jump(desc, JUMP_JSL |
895 JUMP_TEST_ALL | JUMP_COND_SHRD |
896 JUMP_COND_SELF);
897 if (keys_fit_inline)
898 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
899 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
900 else
901 append_key(desc, ctx->key_dma, ctx->enckeylen,
902 CLASS_1 | KEY_DEST_CLASS_REG);
903 set_jump_tgt_here(desc, key_jump_cmd);
904
905 /* class 1 operation */
906 append_operation(desc, ctx->class1_alg_type |
907 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
908
Herbert Xuf2147b82015-06-16 13:54:23 +0800909 /* if assoclen is ZERO, skip reading the assoc data */
910 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300911 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800912 JUMP_COND_MATH_Z);
913
914 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
915
916 /* skip assoc data */
917 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
918
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300919 /* read assoc data */
920 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
921 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
Herbert Xuf2147b82015-06-16 13:54:23 +0800922
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300923 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
924
Herbert Xuf2147b82015-06-16 13:54:23 +0800925 /* cryptlen = seqoutlen - assoclen */
926 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
927
928 /* jump to zero-payload command if cryptlen is zero */
929 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
930 JUMP_COND_MATH_Z);
931
932 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300933
934 /* store encrypted data */
935 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
936
937 /* read payload data */
938 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
939 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
940
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300941 /* zero-payload command */
942 set_jump_tgt_here(desc, zero_payload_jump_cmd);
943
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300944 /* read ICV */
945 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
946 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
947
948 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
949 desc_bytes(desc),
950 DMA_TO_DEVICE);
951 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
952 dev_err(jrdev, "unable to map shared descriptor\n");
953 return -ENOMEM;
954 }
955#ifdef DEBUG
956 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
957 DUMP_PREFIX_ADDRESS, 16, 4, desc,
958 desc_bytes(desc), 1);
959#endif
960
961 return 0;
962}
963
964static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
965{
966 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
967
968 ctx->authsize = authsize;
969 gcm_set_sh_desc(authenc);
970
971 return 0;
972}
973
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300974static int rfc4106_set_sh_desc(struct crypto_aead *aead)
975{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300976 struct caam_ctx *ctx = crypto_aead_ctx(aead);
977 struct device *jrdev = ctx->jrdev;
978 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800979 u32 *key_jump_cmd;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300980 u32 *desc;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300981
982 if (!ctx->enckeylen || !ctx->authsize)
983 return 0;
984
985 /*
986 * RFC4106 encrypt shared descriptor
987 * Job Descriptor and Shared Descriptor
988 * must fit into the 64-word Descriptor h/w Buffer
989 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800990 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300991 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
992 keys_fit_inline = true;
993
994 desc = ctx->sh_desc_enc;
995
996 init_sh_desc(desc, HDR_SHARE_SERIAL);
997
998 /* Skip key loading if it is loaded due to sharing */
999 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1000 JUMP_COND_SHRD);
1001 if (keys_fit_inline)
1002 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1003 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1004 else
1005 append_key(desc, ctx->key_dma, ctx->enckeylen,
1006 CLASS_1 | KEY_DEST_CLASS_REG);
1007 set_jump_tgt_here(desc, key_jump_cmd);
1008
1009 /* Class 1 operation */
1010 append_operation(desc, ctx->class1_alg_type |
1011 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1012
Herbert Xu46218752015-07-09 07:17:33 +08001013 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001014 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1015
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001016 /* Read assoc data */
1017 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1018 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1019
Herbert Xu46218752015-07-09 07:17:33 +08001020 /* Skip IV */
1021 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001022
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001023 /* Will read cryptlen bytes */
Herbert Xuf2147b82015-06-16 13:54:23 +08001024 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001025
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001026 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1027 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001028
Herbert Xu46218752015-07-09 07:17:33 +08001029 /* Skip assoc data */
1030 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1031
1032 /* cryptlen = seqoutlen - assoclen */
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001033 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
Herbert Xu46218752015-07-09 07:17:33 +08001034
1035 /* Write encrypted data */
1036 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1037
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001038 /* Read payload data */
1039 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1040 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1041
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001042 /* Write ICV */
1043 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1044 LDST_SRCDST_BYTE_CONTEXT);
1045
1046 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1047 desc_bytes(desc),
1048 DMA_TO_DEVICE);
1049 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1050 dev_err(jrdev, "unable to map shared descriptor\n");
1051 return -ENOMEM;
1052 }
1053#ifdef DEBUG
1054 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1055 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1056 desc_bytes(desc), 1);
1057#endif
1058
1059 /*
1060 * Job Descriptor and Shared Descriptors
1061 * must all fit into the 64-word Descriptor h/w Buffer
1062 */
1063 keys_fit_inline = false;
1064 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1065 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1066 keys_fit_inline = true;
1067
1068 desc = ctx->sh_desc_dec;
1069
1070 init_sh_desc(desc, HDR_SHARE_SERIAL);
1071
1072 /* Skip key loading if it is loaded due to sharing */
1073 key_jump_cmd = append_jump(desc, JUMP_JSL |
1074 JUMP_TEST_ALL | JUMP_COND_SHRD);
1075 if (keys_fit_inline)
1076 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1077 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1078 else
1079 append_key(desc, ctx->key_dma, ctx->enckeylen,
1080 CLASS_1 | KEY_DEST_CLASS_REG);
1081 set_jump_tgt_here(desc, key_jump_cmd);
1082
1083 /* Class 1 operation */
1084 append_operation(desc, ctx->class1_alg_type |
1085 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1086
Herbert Xu46218752015-07-09 07:17:33 +08001087 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Herbert Xuf2147b82015-06-16 13:54:23 +08001088 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001089
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001090 /* Read assoc data */
1091 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1092 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1093
Herbert Xu46218752015-07-09 07:17:33 +08001094 /* Skip IV */
1095 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001096
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001097 /* Will read cryptlen bytes */
Herbert Xu46218752015-07-09 07:17:33 +08001098 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001099
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001100 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1101 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001102
Herbert Xu46218752015-07-09 07:17:33 +08001103 /* Skip assoc data */
1104 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1105
1106 /* Will write cryptlen bytes */
1107 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1108
1109 /* Store payload data */
1110 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1111
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001112 /* Read encrypted data */
1113 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1114 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1115
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001116 /* Read ICV */
1117 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1118 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1119
1120 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1121 desc_bytes(desc),
1122 DMA_TO_DEVICE);
1123 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1124 dev_err(jrdev, "unable to map shared descriptor\n");
1125 return -ENOMEM;
1126 }
1127#ifdef DEBUG
1128 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1129 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1130 desc_bytes(desc), 1);
1131#endif
1132
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001133 return 0;
1134}
1135
1136static int rfc4106_setauthsize(struct crypto_aead *authenc,
1137 unsigned int authsize)
1138{
1139 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1140
1141 ctx->authsize = authsize;
1142 rfc4106_set_sh_desc(authenc);
1143
1144 return 0;
1145}
1146
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001147static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1148{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001149 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1150 struct device *jrdev = ctx->jrdev;
1151 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001152 u32 *key_jump_cmd;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001153 u32 *read_move_cmd, *write_move_cmd;
1154 u32 *desc;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001155
1156 if (!ctx->enckeylen || !ctx->authsize)
1157 return 0;
1158
1159 /*
1160 * RFC4543 encrypt shared descriptor
1161 * Job Descriptor and Shared Descriptor
1162 * must fit into the 64-word Descriptor h/w Buffer
1163 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001164 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001165 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1166 keys_fit_inline = true;
1167
1168 desc = ctx->sh_desc_enc;
1169
1170 init_sh_desc(desc, HDR_SHARE_SERIAL);
1171
1172 /* Skip key loading if it is loaded due to sharing */
1173 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1174 JUMP_COND_SHRD);
1175 if (keys_fit_inline)
1176 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1177 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1178 else
1179 append_key(desc, ctx->key_dma, ctx->enckeylen,
1180 CLASS_1 | KEY_DEST_CLASS_REG);
1181 set_jump_tgt_here(desc, key_jump_cmd);
1182
1183 /* Class 1 operation */
1184 append_operation(desc, ctx->class1_alg_type |
1185 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1186
Herbert Xuf2147b82015-06-16 13:54:23 +08001187 /* assoclen + cryptlen = seqinlen */
1188 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001189
1190 /*
1191 * MOVE_LEN opcode is not available in all SEC HW revisions,
1192 * thus need to do some magic, i.e. self-patch the descriptor
1193 * buffer.
1194 */
1195 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1196 (0x6 << MOVE_LEN_SHIFT));
1197 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1198 (0x8 << MOVE_LEN_SHIFT));
1199
Herbert Xuf2147b82015-06-16 13:54:23 +08001200 /* Will read assoclen + cryptlen bytes */
1201 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001202
Herbert Xuf2147b82015-06-16 13:54:23 +08001203 /* Will write assoclen + cryptlen bytes */
1204 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1205
1206 /* Read and write assoclen + cryptlen bytes */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001207 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1208
1209 set_move_tgt_here(desc, read_move_cmd);
1210 set_move_tgt_here(desc, write_move_cmd);
1211 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1212 /* Move payload data to OFIFO */
1213 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1214
1215 /* Write ICV */
1216 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1217 LDST_SRCDST_BYTE_CONTEXT);
1218
1219 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1220 desc_bytes(desc),
1221 DMA_TO_DEVICE);
1222 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1223 dev_err(jrdev, "unable to map shared descriptor\n");
1224 return -ENOMEM;
1225 }
1226#ifdef DEBUG
1227 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1228 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1229 desc_bytes(desc), 1);
1230#endif
1231
1232 /*
1233 * Job Descriptor and Shared Descriptors
1234 * must all fit into the 64-word Descriptor h/w Buffer
1235 */
1236 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001237 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001238 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1239 keys_fit_inline = true;
1240
1241 desc = ctx->sh_desc_dec;
1242
1243 init_sh_desc(desc, HDR_SHARE_SERIAL);
1244
1245 /* Skip key loading if it is loaded due to sharing */
1246 key_jump_cmd = append_jump(desc, JUMP_JSL |
1247 JUMP_TEST_ALL | JUMP_COND_SHRD);
1248 if (keys_fit_inline)
1249 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1250 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1251 else
1252 append_key(desc, ctx->key_dma, ctx->enckeylen,
1253 CLASS_1 | KEY_DEST_CLASS_REG);
1254 set_jump_tgt_here(desc, key_jump_cmd);
1255
1256 /* Class 1 operation */
1257 append_operation(desc, ctx->class1_alg_type |
1258 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1259
Herbert Xuf2147b82015-06-16 13:54:23 +08001260 /* assoclen + cryptlen = seqoutlen */
1261 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001262
1263 /*
1264 * MOVE_LEN opcode is not available in all SEC HW revisions,
1265 * thus need to do some magic, i.e. self-patch the descriptor
1266 * buffer.
1267 */
1268 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1269 (0x6 << MOVE_LEN_SHIFT));
1270 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1271 (0x8 << MOVE_LEN_SHIFT));
1272
Herbert Xuf2147b82015-06-16 13:54:23 +08001273 /* Will read assoclen + cryptlen bytes */
1274 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001275
Herbert Xuf2147b82015-06-16 13:54:23 +08001276 /* Will write assoclen + cryptlen bytes */
1277 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001278
1279 /* Store payload data */
1280 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1281
Herbert Xuf2147b82015-06-16 13:54:23 +08001282 /* In-snoop assoclen + cryptlen data */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001283 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1284 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1285
1286 set_move_tgt_here(desc, read_move_cmd);
1287 set_move_tgt_here(desc, write_move_cmd);
1288 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1289 /* Move payload data to OFIFO */
1290 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1291 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1292
1293 /* Read ICV */
1294 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1295 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1296
1297 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1298 desc_bytes(desc),
1299 DMA_TO_DEVICE);
1300 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1301 dev_err(jrdev, "unable to map shared descriptor\n");
1302 return -ENOMEM;
1303 }
1304#ifdef DEBUG
1305 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1306 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1307 desc_bytes(desc), 1);
1308#endif
1309
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001310 return 0;
1311}
1312
1313static int rfc4543_setauthsize(struct crypto_aead *authenc,
1314 unsigned int authsize)
1315{
1316 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1317
1318 ctx->authsize = authsize;
1319 rfc4543_set_sh_desc(authenc);
1320
1321 return 0;
1322}
1323
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001324static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1325 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001326{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001327 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1328 ctx->split_key_pad_len, key_in, authkeylen,
1329 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001330}
1331
Yuan Kang0e479302011-07-15 11:21:41 +08001332static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001333 const u8 *key, unsigned int keylen)
1334{
1335 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1336 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1337 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1338 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001339 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001340 int ret = 0;
1341
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001342 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001343 goto badkey;
1344
1345 /* Pick class 2 key length from algorithm submask */
1346 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1347 OP_ALG_ALGSEL_SHIFT] * 2;
1348 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1349
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001350 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1351 goto badkey;
1352
Kim Phillips8e8ec592011-03-13 16:54:26 +08001353#ifdef DEBUG
1354 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001355 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1356 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001357 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1358 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001359 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001360 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1361#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001362
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001363 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001364 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001365 goto badkey;
1366 }
1367
1368 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001369 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001370
Yuan Kang885e9e22011-07-15 11:21:41 +08001371 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001372 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001373 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001374 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001375 return -ENOMEM;
1376 }
1377#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001378 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001379 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001380 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001381#endif
1382
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001383 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001384
Yuan Kang1acebad2011-07-15 11:21:42 +08001385 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001386 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001387 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001388 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001389 }
1390
1391 return ret;
1392badkey:
1393 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1394 return -EINVAL;
1395}
1396
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001397static int gcm_setkey(struct crypto_aead *aead,
1398 const u8 *key, unsigned int keylen)
1399{
1400 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1401 struct device *jrdev = ctx->jrdev;
1402 int ret = 0;
1403
1404#ifdef DEBUG
1405 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1407#endif
1408
1409 memcpy(ctx->key, key, keylen);
1410 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1411 DMA_TO_DEVICE);
1412 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1413 dev_err(jrdev, "unable to map key i/o memory\n");
1414 return -ENOMEM;
1415 }
1416 ctx->enckeylen = keylen;
1417
1418 ret = gcm_set_sh_desc(aead);
1419 if (ret) {
1420 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1421 DMA_TO_DEVICE);
1422 }
1423
1424 return ret;
1425}
1426
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001427static int rfc4106_setkey(struct crypto_aead *aead,
1428 const u8 *key, unsigned int keylen)
1429{
1430 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1431 struct device *jrdev = ctx->jrdev;
1432 int ret = 0;
1433
1434 if (keylen < 4)
1435 return -EINVAL;
1436
1437#ifdef DEBUG
1438 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1439 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1440#endif
1441
1442 memcpy(ctx->key, key, keylen);
1443
1444 /*
1445 * The last four bytes of the key material are used as the salt value
1446 * in the nonce. Update the AES key length.
1447 */
1448 ctx->enckeylen = keylen - 4;
1449
1450 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1451 DMA_TO_DEVICE);
1452 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1453 dev_err(jrdev, "unable to map key i/o memory\n");
1454 return -ENOMEM;
1455 }
1456
1457 ret = rfc4106_set_sh_desc(aead);
1458 if (ret) {
1459 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1460 DMA_TO_DEVICE);
1461 }
1462
1463 return ret;
1464}
1465
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001466static int rfc4543_setkey(struct crypto_aead *aead,
1467 const u8 *key, unsigned int keylen)
1468{
1469 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1470 struct device *jrdev = ctx->jrdev;
1471 int ret = 0;
1472
1473 if (keylen < 4)
1474 return -EINVAL;
1475
1476#ifdef DEBUG
1477 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1478 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1479#endif
1480
1481 memcpy(ctx->key, key, keylen);
1482
1483 /*
1484 * The last four bytes of the key material are used as the salt value
1485 * in the nonce. Update the AES key length.
1486 */
1487 ctx->enckeylen = keylen - 4;
1488
1489 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1490 DMA_TO_DEVICE);
1491 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1492 dev_err(jrdev, "unable to map key i/o memory\n");
1493 return -ENOMEM;
1494 }
1495
1496 ret = rfc4543_set_sh_desc(aead);
1497 if (ret) {
1498 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1499 DMA_TO_DEVICE);
1500 }
1501
1502 return ret;
1503}
1504
Yuan Kangacdca312011-07-15 11:21:42 +08001505static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1506 const u8 *key, unsigned int keylen)
1507{
1508 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001509 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1510 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1511 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001512 struct device *jrdev = ctx->jrdev;
1513 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001514 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001515 u32 *desc;
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001516 u8 *nonce;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001517 u32 geniv;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001518 u32 ctx1_iv_off = 0;
1519 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1520 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001521 const bool is_rfc3686 = (ctr_mode &&
1522 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001523
1524#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001525 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001526 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1527#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001528 /*
1529 * AES-CTR needs to load IV in CONTEXT1 reg
1530 * at an offset of 128bits (16bytes)
1531 * CONTEXT1[255:128] = IV
1532 */
1533 if (ctr_mode)
1534 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001535
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001536 /*
1537 * RFC3686 specific:
1538 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1539 * | *key = {KEY, NONCE}
1540 */
1541 if (is_rfc3686) {
1542 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1543 keylen -= CTR_RFC3686_NONCE_SIZE;
1544 }
1545
Yuan Kangacdca312011-07-15 11:21:42 +08001546 memcpy(ctx->key, key, keylen);
1547 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1548 DMA_TO_DEVICE);
1549 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1550 dev_err(jrdev, "unable to map key i/o memory\n");
1551 return -ENOMEM;
1552 }
1553 ctx->enckeylen = keylen;
1554
1555 /* ablkcipher_encrypt shared descriptor */
1556 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001557 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001558 /* Skip if already shared */
1559 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1560 JUMP_COND_SHRD);
1561
1562 /* Load class1 key only */
1563 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1564 ctx->enckeylen, CLASS_1 |
1565 KEY_DEST_CLASS_REG);
1566
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001567 /* Load nonce into CONTEXT1 reg */
1568 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001569 nonce = (u8 *)key + keylen;
1570 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1571 LDST_CLASS_IND_CCB |
1572 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001573 append_move(desc, MOVE_WAITCOMP |
1574 MOVE_SRC_OUTFIFO |
1575 MOVE_DEST_CLASS1CTX |
1576 (16 << MOVE_OFFSET_SHIFT) |
1577 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1578 }
1579
Yuan Kangacdca312011-07-15 11:21:42 +08001580 set_jump_tgt_here(desc, key_jump_cmd);
1581
Yuan Kangacdca312011-07-15 11:21:42 +08001582 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001583 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001584 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001585
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001586 /* Load counter into CONTEXT1 reg */
1587 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001588 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1589 LDST_SRCDST_BYTE_CONTEXT |
1590 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1591 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001592
Yuan Kangacdca312011-07-15 11:21:42 +08001593 /* Load operation */
1594 append_operation(desc, ctx->class1_alg_type |
1595 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1596
1597 /* Perform operation */
1598 ablkcipher_append_src_dst(desc);
1599
1600 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1601 desc_bytes(desc),
1602 DMA_TO_DEVICE);
1603 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1604 dev_err(jrdev, "unable to map shared descriptor\n");
1605 return -ENOMEM;
1606 }
1607#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001608 print_hex_dump(KERN_ERR,
1609 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001610 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1611 desc_bytes(desc), 1);
1612#endif
1613 /* ablkcipher_decrypt shared descriptor */
1614 desc = ctx->sh_desc_dec;
1615
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001616 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001617 /* Skip if already shared */
1618 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1619 JUMP_COND_SHRD);
1620
1621 /* Load class1 key only */
1622 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1623 ctx->enckeylen, CLASS_1 |
1624 KEY_DEST_CLASS_REG);
1625
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001626 /* Load nonce into CONTEXT1 reg */
1627 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001628 nonce = (u8 *)key + keylen;
1629 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1630 LDST_CLASS_IND_CCB |
1631 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001632 append_move(desc, MOVE_WAITCOMP |
1633 MOVE_SRC_OUTFIFO |
1634 MOVE_DEST_CLASS1CTX |
1635 (16 << MOVE_OFFSET_SHIFT) |
1636 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1637 }
1638
Yuan Kangacdca312011-07-15 11:21:42 +08001639 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001640
1641 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001642 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001643 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001644
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001645 /* Load counter into CONTEXT1 reg */
1646 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001647 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1648 LDST_SRCDST_BYTE_CONTEXT |
1649 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1650 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001651
Yuan Kangacdca312011-07-15 11:21:42 +08001652 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001653 if (ctr_mode)
1654 append_operation(desc, ctx->class1_alg_type |
1655 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1656 else
1657 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001658
1659 /* Perform operation */
1660 ablkcipher_append_src_dst(desc);
1661
Yuan Kangacdca312011-07-15 11:21:42 +08001662 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1663 desc_bytes(desc),
1664 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001665 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001666 dev_err(jrdev, "unable to map shared descriptor\n");
1667 return -ENOMEM;
1668 }
1669
1670#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001671 print_hex_dump(KERN_ERR,
1672 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001673 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1674 desc_bytes(desc), 1);
1675#endif
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001676 /* ablkcipher_givencrypt shared descriptor */
1677 desc = ctx->sh_desc_givenc;
1678
1679 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1680 /* Skip if already shared */
1681 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1682 JUMP_COND_SHRD);
1683
1684 /* Load class1 key only */
1685 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1686 ctx->enckeylen, CLASS_1 |
1687 KEY_DEST_CLASS_REG);
1688
1689 /* Load Nonce into CONTEXT1 reg */
1690 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001691 nonce = (u8 *)key + keylen;
1692 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1693 LDST_CLASS_IND_CCB |
1694 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001695 append_move(desc, MOVE_WAITCOMP |
1696 MOVE_SRC_OUTFIFO |
1697 MOVE_DEST_CLASS1CTX |
1698 (16 << MOVE_OFFSET_SHIFT) |
1699 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1700 }
1701 set_jump_tgt_here(desc, key_jump_cmd);
1702
1703 /* Generate IV */
1704 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1705 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1706 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1707 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1708 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1709 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1710 append_move(desc, MOVE_WAITCOMP |
1711 MOVE_SRC_INFIFO |
1712 MOVE_DEST_CLASS1CTX |
1713 (crt->ivsize << MOVE_LEN_SHIFT) |
1714 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1715 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1716
1717 /* Copy generated IV to memory */
1718 append_seq_store(desc, crt->ivsize,
1719 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1720 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1721
1722 /* Load Counter into CONTEXT1 reg */
1723 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001724 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1725 LDST_SRCDST_BYTE_CONTEXT |
1726 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1727 LDST_OFFSET_SHIFT));
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001728
1729 if (ctx1_iv_off)
1730 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1731 (1 << JUMP_OFFSET_SHIFT));
1732
1733 /* Load operation */
1734 append_operation(desc, ctx->class1_alg_type |
1735 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1736
1737 /* Perform operation */
1738 ablkcipher_append_src_dst(desc);
1739
1740 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1741 desc_bytes(desc),
1742 DMA_TO_DEVICE);
1743 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1744 dev_err(jrdev, "unable to map shared descriptor\n");
1745 return -ENOMEM;
1746 }
1747#ifdef DEBUG
1748 print_hex_dump(KERN_ERR,
1749 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1750 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1751 desc_bytes(desc), 1);
1752#endif
Yuan Kangacdca312011-07-15 11:21:42 +08001753
1754 return ret;
1755}
1756
Catalin Vasilec6415a62015-10-02 13:13:18 +03001757static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1758 const u8 *key, unsigned int keylen)
1759{
1760 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1761 struct device *jrdev = ctx->jrdev;
1762 u32 *key_jump_cmd, *desc;
1763 __be64 sector_size = cpu_to_be64(512);
1764
1765 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1766 crypto_ablkcipher_set_flags(ablkcipher,
1767 CRYPTO_TFM_RES_BAD_KEY_LEN);
1768 dev_err(jrdev, "key size mismatch\n");
1769 return -EINVAL;
1770 }
1771
1772 memcpy(ctx->key, key, keylen);
1773 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1774 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1775 dev_err(jrdev, "unable to map key i/o memory\n");
1776 return -ENOMEM;
1777 }
1778 ctx->enckeylen = keylen;
1779
1780 /* xts_ablkcipher_encrypt shared descriptor */
1781 desc = ctx->sh_desc_enc;
1782 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1783 /* Skip if already shared */
1784 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1785 JUMP_COND_SHRD);
1786
1787 /* Load class1 keys only */
1788 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1789 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1790
1791 /* Load sector size with index 40 bytes (0x28) */
1792 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1793 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1794 append_data(desc, (void *)&sector_size, 8);
1795
1796 set_jump_tgt_here(desc, key_jump_cmd);
1797
1798 /*
1799 * create sequence for loading the sector index
1800 * Upper 8B of IV - will be used as sector index
1801 * Lower 8B of IV - will be discarded
1802 */
1803 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1804 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1805 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1806
1807 /* Load operation */
1808 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1809 OP_ALG_ENCRYPT);
1810
1811 /* Perform operation */
1812 ablkcipher_append_src_dst(desc);
1813
1814 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1815 DMA_TO_DEVICE);
1816 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1817 dev_err(jrdev, "unable to map shared descriptor\n");
1818 return -ENOMEM;
1819 }
1820#ifdef DEBUG
1821 print_hex_dump(KERN_ERR,
1822 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1823 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1824#endif
1825
1826 /* xts_ablkcipher_decrypt shared descriptor */
1827 desc = ctx->sh_desc_dec;
1828
1829 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1830 /* Skip if already shared */
1831 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1832 JUMP_COND_SHRD);
1833
1834 /* Load class1 key only */
1835 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1836 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1837
1838 /* Load sector size with index 40 bytes (0x28) */
1839 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1840 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1841 append_data(desc, (void *)&sector_size, 8);
1842
1843 set_jump_tgt_here(desc, key_jump_cmd);
1844
1845 /*
1846 * create sequence for loading the sector index
1847 * Upper 8B of IV - will be used as sector index
1848 * Lower 8B of IV - will be discarded
1849 */
1850 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1851 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1852 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1853
1854 /* Load operation */
1855 append_dec_op1(desc, ctx->class1_alg_type);
1856
1857 /* Perform operation */
1858 ablkcipher_append_src_dst(desc);
1859
1860 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1861 DMA_TO_DEVICE);
1862 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1863 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1864 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1865 dev_err(jrdev, "unable to map shared descriptor\n");
1866 return -ENOMEM;
1867 }
1868#ifdef DEBUG
1869 print_hex_dump(KERN_ERR,
1870 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1871 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1872#endif
1873
1874 return 0;
1875}
1876
Kim Phillips8e8ec592011-03-13 16:54:26 +08001877/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001878 * aead_edesc - s/w-extended aead descriptor
1879 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Kim Phillips8e8ec592011-03-13 16:54:26 +08001880 * @src_nents: number of segments in input scatterlist
1881 * @dst_nents: number of segments in output scatterlist
Yuan Kang1acebad2011-07-15 11:21:42 +08001882 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001883 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001884 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1885 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001886 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1887 */
Yuan Kang0e479302011-07-15 11:21:41 +08001888struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001889 int assoc_nents;
1890 int src_nents;
1891 int dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001892 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001893 int sec4_sg_bytes;
1894 dma_addr_t sec4_sg_dma;
1895 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +08001896 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +08001897};
1898
Yuan Kangacdca312011-07-15 11:21:42 +08001899/*
1900 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1901 * @src_nents: number of segments in input scatterlist
1902 * @dst_nents: number of segments in output scatterlist
1903 * @iv_dma: dma address of iv for checking continuity and link table
1904 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001905 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1906 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001907 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1908 */
1909struct ablkcipher_edesc {
1910 int src_nents;
1911 int dst_nents;
1912 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001913 int sec4_sg_bytes;
1914 dma_addr_t sec4_sg_dma;
1915 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001916 u32 hw_desc[0];
1917};
1918
Yuan Kang1acebad2011-07-15 11:21:42 +08001919static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001920 struct scatterlist *dst, int src_nents,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001921 int dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -05001922 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1923 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001924{
Yuan Kang643b39b2012-06-22 19:48:49 -05001925 if (dst != src) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001926 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1927 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001928 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001929 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001930 }
1931
Yuan Kang1acebad2011-07-15 11:21:42 +08001932 if (iv_dma)
1933 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001934 if (sec4_sg_bytes)
1935 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001936 DMA_TO_DEVICE);
1937}
1938
Yuan Kang1acebad2011-07-15 11:21:42 +08001939static void aead_unmap(struct device *dev,
1940 struct aead_edesc *edesc,
1941 struct aead_request *req)
1942{
Herbert Xuf2147b82015-06-16 13:54:23 +08001943 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001944 edesc->src_nents, edesc->dst_nents, 0, 0,
Herbert Xuf2147b82015-06-16 13:54:23 +08001945 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1946}
1947
Yuan Kangacdca312011-07-15 11:21:42 +08001948static void ablkcipher_unmap(struct device *dev,
1949 struct ablkcipher_edesc *edesc,
1950 struct ablkcipher_request *req)
1951{
1952 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1953 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1954
1955 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001956 edesc->src_nents, edesc->dst_nents,
1957 edesc->iv_dma, ivsize,
Yuan Kang643b39b2012-06-22 19:48:49 -05001958 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001959}
1960
Yuan Kang0e479302011-07-15 11:21:41 +08001961static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001962 void *context)
1963{
Yuan Kang0e479302011-07-15 11:21:41 +08001964 struct aead_request *req = context;
1965 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001966
1967#ifdef DEBUG
1968 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1969#endif
1970
1971 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1972
1973 if (err)
1974 caam_jr_strstatus(jrdev, err);
1975
1976 aead_unmap(jrdev, edesc, req);
1977
1978 kfree(edesc);
1979
1980 aead_request_complete(req, err);
1981}
1982
Yuan Kang0e479302011-07-15 11:21:41 +08001983static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001984 void *context)
1985{
Yuan Kang0e479302011-07-15 11:21:41 +08001986 struct aead_request *req = context;
1987 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001988
1989#ifdef DEBUG
1990 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1991#endif
1992
1993 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1994
1995 if (err)
1996 caam_jr_strstatus(jrdev, err);
1997
1998 aead_unmap(jrdev, edesc, req);
1999
2000 /*
2001 * verify hw auth check passed else return -EBADMSG
2002 */
2003 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2004 err = -EBADMSG;
2005
2006 kfree(edesc);
2007
2008 aead_request_complete(req, err);
2009}
2010
Yuan Kangacdca312011-07-15 11:21:42 +08002011static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2012 void *context)
2013{
2014 struct ablkcipher_request *req = context;
2015 struct ablkcipher_edesc *edesc;
2016#ifdef DEBUG
2017 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2018 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2019
2020 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2021#endif
2022
2023 edesc = (struct ablkcipher_edesc *)((char *)desc -
2024 offsetof(struct ablkcipher_edesc, hw_desc));
2025
Marek Vasutfa9659c2014-04-24 20:05:12 +02002026 if (err)
2027 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002028
2029#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002030 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002031 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2032 edesc->src_nents > 1 ? 100 : ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002033 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2034 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
Horia Geantă00fef2b2016-11-09 10:46:16 +02002035 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002036#endif
2037
2038 ablkcipher_unmap(jrdev, edesc, req);
2039 kfree(edesc);
2040
2041 ablkcipher_request_complete(req, err);
2042}
2043
2044static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2045 void *context)
2046{
2047 struct ablkcipher_request *req = context;
2048 struct ablkcipher_edesc *edesc;
2049#ifdef DEBUG
2050 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2051 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2052
2053 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2054#endif
2055
2056 edesc = (struct ablkcipher_edesc *)((char *)desc -
2057 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002058 if (err)
2059 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002060
2061#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002062 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002063 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2064 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002065 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2066 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
Horia Geantă00fef2b2016-11-09 10:46:16 +02002067 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002068#endif
2069
2070 ablkcipher_unmap(jrdev, edesc, req);
2071 kfree(edesc);
2072
2073 ablkcipher_request_complete(req, err);
2074}
2075
Kim Phillips8e8ec592011-03-13 16:54:26 +08002076/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002077 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002078 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002079static void init_aead_job(struct aead_request *req,
2080 struct aead_edesc *edesc,
2081 bool all_contig, bool encrypt)
2082{
2083 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2084 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2085 int authsize = ctx->authsize;
2086 u32 *desc = edesc->hw_desc;
2087 u32 out_options, in_options;
2088 dma_addr_t dst_dma, src_dma;
2089 int len, sec4_sg_index = 0;
2090 dma_addr_t ptr;
2091 u32 *sh_desc;
2092
2093 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2094 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2095
2096 len = desc_len(sh_desc);
2097 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2098
2099 if (all_contig) {
2100 src_dma = sg_dma_address(req->src);
2101 in_options = 0;
2102 } else {
2103 src_dma = edesc->sec4_sg_dma;
2104 sec4_sg_index += edesc->src_nents;
2105 in_options = LDST_SGF;
2106 }
2107
2108 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2109 in_options);
2110
2111 dst_dma = src_dma;
2112 out_options = in_options;
2113
2114 if (unlikely(req->src != req->dst)) {
2115 if (!edesc->dst_nents) {
2116 dst_dma = sg_dma_address(req->dst);
2117 } else {
2118 dst_dma = edesc->sec4_sg_dma +
2119 sec4_sg_index *
2120 sizeof(struct sec4_sg_entry);
2121 out_options = LDST_SGF;
2122 }
2123 }
2124
2125 if (encrypt)
2126 append_seq_out_ptr(desc, dst_dma,
2127 req->assoclen + req->cryptlen + authsize,
2128 out_options);
2129 else
2130 append_seq_out_ptr(desc, dst_dma,
2131 req->assoclen + req->cryptlen - authsize,
2132 out_options);
2133
2134 /* REG3 = assoclen */
2135 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2136}
2137
2138static void init_gcm_job(struct aead_request *req,
2139 struct aead_edesc *edesc,
2140 bool all_contig, bool encrypt)
2141{
2142 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2143 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2144 unsigned int ivsize = crypto_aead_ivsize(aead);
2145 u32 *desc = edesc->hw_desc;
2146 bool generic_gcm = (ivsize == 12);
2147 unsigned int last;
2148
2149 init_aead_job(req, edesc, all_contig, encrypt);
2150
2151 /* BUG This should not be specific to generic GCM. */
2152 last = 0;
2153 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2154 last = FIFOLD_TYPE_LAST1;
2155
2156 /* Read GCM IV */
2157 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2158 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2159 /* Append Salt */
2160 if (!generic_gcm)
2161 append_data(desc, ctx->key + ctx->enckeylen, 4);
2162 /* Append IV */
2163 append_data(desc, req->iv, ivsize);
2164 /* End of blank commands */
2165}
2166
Herbert Xu479bcc72015-07-30 17:53:17 +08002167static void init_authenc_job(struct aead_request *req,
2168 struct aead_edesc *edesc,
2169 bool all_contig, bool encrypt)
Yuan Kang1acebad2011-07-15 11:21:42 +08002170{
2171 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Herbert Xu479bcc72015-07-30 17:53:17 +08002172 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2173 struct caam_aead_alg, aead);
2174 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002175 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Herbert Xu479bcc72015-07-30 17:53:17 +08002176 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2177 OP_ALG_AAI_CTR_MOD128);
2178 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +08002179 u32 *desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002180 u32 ivoffset = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002181
Herbert Xu479bcc72015-07-30 17:53:17 +08002182 /*
2183 * AES-CTR needs to load IV in CONTEXT1 reg
2184 * at an offset of 128bits (16bytes)
2185 * CONTEXT1[255:128] = IV
2186 */
2187 if (ctr_mode)
2188 ivoffset = 16;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002189
Herbert Xu479bcc72015-07-30 17:53:17 +08002190 /*
2191 * RFC3686 specific:
2192 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2193 */
2194 if (is_rfc3686)
2195 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002196
Herbert Xu479bcc72015-07-30 17:53:17 +08002197 init_aead_job(req, edesc, all_contig, encrypt);
Yuan Kang1acebad2011-07-15 11:21:42 +08002198
Horia Geantă8b18e232016-08-29 14:52:14 +03002199 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
Herbert Xu479bcc72015-07-30 17:53:17 +08002200 append_load_as_imm(desc, req->iv, ivsize,
2201 LDST_CLASS_1_CCB |
2202 LDST_SRCDST_BYTE_CONTEXT |
2203 (ivoffset << LDST_OFFSET_SHIFT));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002204}
2205
2206/*
Yuan Kangacdca312011-07-15 11:21:42 +08002207 * Fill in ablkcipher job descriptor
2208 */
2209static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2210 struct ablkcipher_edesc *edesc,
2211 struct ablkcipher_request *req,
2212 bool iv_contig)
2213{
2214 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2215 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2216 u32 *desc = edesc->hw_desc;
2217 u32 out_options = 0, in_options;
2218 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002219 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002220
2221#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002222 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002223 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2224 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002225 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
2226 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
2227 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
Horia Geantă00fef2b2016-11-09 10:46:16 +02002228 edesc->src_nents ? 100 : req->nbytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002229#endif
2230
2231 len = desc_len(sh_desc);
2232 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2233
2234 if (iv_contig) {
2235 src_dma = edesc->iv_dma;
2236 in_options = 0;
2237 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002238 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02002239 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08002240 in_options = LDST_SGF;
2241 }
2242 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2243
2244 if (likely(req->src == req->dst)) {
2245 if (!edesc->src_nents && iv_contig) {
2246 dst_dma = sg_dma_address(req->src);
2247 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002248 dst_dma = edesc->sec4_sg_dma +
2249 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002250 out_options = LDST_SGF;
2251 }
2252 } else {
2253 if (!edesc->dst_nents) {
2254 dst_dma = sg_dma_address(req->dst);
2255 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002256 dst_dma = edesc->sec4_sg_dma +
2257 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002258 out_options = LDST_SGF;
2259 }
2260 }
2261 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2262}
2263
2264/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002265 * Fill in ablkcipher givencrypt job descriptor
2266 */
2267static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2268 struct ablkcipher_edesc *edesc,
2269 struct ablkcipher_request *req,
2270 bool iv_contig)
2271{
2272 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2273 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2274 u32 *desc = edesc->hw_desc;
2275 u32 out_options, in_options;
2276 dma_addr_t dst_dma, src_dma;
2277 int len, sec4_sg_index = 0;
2278
2279#ifdef DEBUG
2280 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2281 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2282 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002283 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2284 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
Horia Geantă00fef2b2016-11-09 10:46:16 +02002285 edesc->src_nents ? 100 : req->nbytes, 1);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002286#endif
2287
2288 len = desc_len(sh_desc);
2289 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2290
2291 if (!edesc->src_nents) {
2292 src_dma = sg_dma_address(req->src);
2293 in_options = 0;
2294 } else {
2295 src_dma = edesc->sec4_sg_dma;
2296 sec4_sg_index += edesc->src_nents;
2297 in_options = LDST_SGF;
2298 }
2299 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2300
2301 if (iv_contig) {
2302 dst_dma = edesc->iv_dma;
2303 out_options = 0;
2304 } else {
2305 dst_dma = edesc->sec4_sg_dma +
2306 sec4_sg_index * sizeof(struct sec4_sg_entry);
2307 out_options = LDST_SGF;
2308 }
2309 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2310}
2311
2312/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002313 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002314 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002315static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2316 int desc_bytes, bool *all_contig_ptr,
2317 bool encrypt)
2318{
2319 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2320 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2321 struct device *jrdev = ctx->jrdev;
2322 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2323 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2324 int src_nents, dst_nents = 0;
2325 struct aead_edesc *edesc;
2326 int sgc;
2327 bool all_contig = true;
Herbert Xuf2147b82015-06-16 13:54:23 +08002328 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2329 unsigned int authsize = ctx->authsize;
2330
2331 if (unlikely(req->dst != req->src)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002332 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
Herbert Xuf2147b82015-06-16 13:54:23 +08002333 dst_nents = sg_count(req->dst,
2334 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002335 (encrypt ? authsize : (-authsize)));
Herbert Xuf2147b82015-06-16 13:54:23 +08002336 } else {
2337 src_nents = sg_count(req->src,
2338 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002339 (encrypt ? authsize : 0));
Herbert Xuf2147b82015-06-16 13:54:23 +08002340 }
2341
2342 /* Check if data are contiguous. */
2343 all_contig = !src_nents;
Horia Geantăc530e342016-11-09 10:46:15 +02002344 if (!all_contig)
Herbert Xuf2147b82015-06-16 13:54:23 +08002345 sec4_sg_len = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002346
2347 sec4_sg_len += dst_nents;
2348
2349 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2350
2351 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002352 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2353 GFP_DMA | flags);
Herbert Xuf2147b82015-06-16 13:54:23 +08002354 if (!edesc) {
2355 dev_err(jrdev, "could not allocate extended descriptor\n");
2356 return ERR_PTR(-ENOMEM);
2357 }
2358
2359 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002360 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2361 DMA_BIDIRECTIONAL);
Herbert Xuf2147b82015-06-16 13:54:23 +08002362 if (unlikely(!sgc)) {
2363 dev_err(jrdev, "unable to map source\n");
2364 kfree(edesc);
2365 return ERR_PTR(-ENOMEM);
2366 }
2367 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002368 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2369 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002370 if (unlikely(!sgc)) {
2371 dev_err(jrdev, "unable to map source\n");
2372 kfree(edesc);
2373 return ERR_PTR(-ENOMEM);
2374 }
2375
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002376 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2377 DMA_FROM_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002378 if (unlikely(!sgc)) {
2379 dev_err(jrdev, "unable to map destination\n");
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002380 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2381 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002382 kfree(edesc);
2383 return ERR_PTR(-ENOMEM);
2384 }
2385 }
2386
2387 edesc->src_nents = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002388 edesc->dst_nents = dst_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002389 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2390 desc_bytes;
2391 *all_contig_ptr = all_contig;
2392
2393 sec4_sg_index = 0;
2394 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08002395 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08002396 edesc->sec4_sg + sec4_sg_index, 0);
2397 sec4_sg_index += src_nents;
2398 }
2399 if (dst_nents) {
2400 sg_to_sec4_sg_last(req->dst, dst_nents,
2401 edesc->sec4_sg + sec4_sg_index, 0);
2402 }
2403
2404 if (!sec4_sg_bytes)
2405 return edesc;
2406
2407 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2408 sec4_sg_bytes, DMA_TO_DEVICE);
2409 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2410 dev_err(jrdev, "unable to map S/G table\n");
2411 aead_unmap(jrdev, edesc, req);
2412 kfree(edesc);
2413 return ERR_PTR(-ENOMEM);
2414 }
2415
2416 edesc->sec4_sg_bytes = sec4_sg_bytes;
2417
2418 return edesc;
2419}
2420
2421static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002422{
Yuan Kang0e479302011-07-15 11:21:41 +08002423 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002424 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002425 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2426 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002427 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002428 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002429 int ret = 0;
2430
Kim Phillips8e8ec592011-03-13 16:54:26 +08002431 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002432 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002433 if (IS_ERR(edesc))
2434 return PTR_ERR(edesc);
2435
Yuan Kang1acebad2011-07-15 11:21:42 +08002436 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002437 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002438#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002439 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002440 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2441 desc_bytes(edesc->hw_desc), 1);
2442#endif
2443
Kim Phillips8e8ec592011-03-13 16:54:26 +08002444 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002445 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2446 if (!ret) {
2447 ret = -EINPROGRESS;
2448 } else {
2449 aead_unmap(jrdev, edesc, req);
2450 kfree(edesc);
2451 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002452
Yuan Kang1acebad2011-07-15 11:21:42 +08002453 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002454}
2455
Herbert Xu46218752015-07-09 07:17:33 +08002456static int ipsec_gcm_encrypt(struct aead_request *req)
2457{
2458 if (req->assoclen < 8)
2459 return -EINVAL;
2460
2461 return gcm_encrypt(req);
2462}
2463
Herbert Xu479bcc72015-07-30 17:53:17 +08002464static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002465{
Yuan Kang1acebad2011-07-15 11:21:42 +08002466 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002467 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002468 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2469 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002470 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002471 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002472 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002473
2474 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002475 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2476 &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08002477 if (IS_ERR(edesc))
2478 return PTR_ERR(edesc);
2479
Herbert Xuf2147b82015-06-16 13:54:23 +08002480 /* Create and submit job descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002481 init_authenc_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002482#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08002483 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2484 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2485 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002486#endif
2487
Herbert Xuf2147b82015-06-16 13:54:23 +08002488 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002489 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002490 if (!ret) {
2491 ret = -EINPROGRESS;
2492 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002493 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002494 kfree(edesc);
2495 }
2496
2497 return ret;
2498}
2499
2500static int gcm_decrypt(struct aead_request *req)
2501{
2502 struct aead_edesc *edesc;
2503 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2504 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2505 struct device *jrdev = ctx->jrdev;
2506 bool all_contig;
2507 u32 *desc;
2508 int ret = 0;
2509
2510 /* allocate extended descriptor */
2511 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2512 if (IS_ERR(edesc))
2513 return PTR_ERR(edesc);
2514
Yuan Kang1acebad2011-07-15 11:21:42 +08002515 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08002516 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad2011-07-15 11:21:42 +08002517#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002518 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002519 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2520 desc_bytes(edesc->hw_desc), 1);
2521#endif
2522
Yuan Kang0e479302011-07-15 11:21:41 +08002523 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002524 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2525 if (!ret) {
2526 ret = -EINPROGRESS;
2527 } else {
2528 aead_unmap(jrdev, edesc, req);
2529 kfree(edesc);
2530 }
Yuan Kang0e479302011-07-15 11:21:41 +08002531
Yuan Kang1acebad2011-07-15 11:21:42 +08002532 return ret;
2533}
Yuan Kang0e479302011-07-15 11:21:41 +08002534
Herbert Xu46218752015-07-09 07:17:33 +08002535static int ipsec_gcm_decrypt(struct aead_request *req)
2536{
2537 if (req->assoclen < 8)
2538 return -EINVAL;
2539
2540 return gcm_decrypt(req);
2541}
2542
Herbert Xu479bcc72015-07-30 17:53:17 +08002543static int aead_decrypt(struct aead_request *req)
Herbert Xuf2147b82015-06-16 13:54:23 +08002544{
2545 struct aead_edesc *edesc;
2546 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2547 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2548 struct device *jrdev = ctx->jrdev;
2549 bool all_contig;
2550 u32 *desc;
2551 int ret = 0;
2552
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002553#ifdef DEBUG
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002554 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2555 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
Horia Geantă00fef2b2016-11-09 10:46:16 +02002556 req->assoclen + req->cryptlen, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002557#endif
2558
Herbert Xuf2147b82015-06-16 13:54:23 +08002559 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002560 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2561 &all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002562 if (IS_ERR(edesc))
2563 return PTR_ERR(edesc);
2564
Herbert Xuf2147b82015-06-16 13:54:23 +08002565 /* Create and submit job descriptor*/
Herbert Xu479bcc72015-07-30 17:53:17 +08002566 init_authenc_job(req, edesc, all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002567#ifdef DEBUG
2568 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2569 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2570 desc_bytes(edesc->hw_desc), 1);
2571#endif
2572
2573 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002574 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002575 if (!ret) {
2576 ret = -EINPROGRESS;
2577 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002578 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002579 kfree(edesc);
2580 }
2581
2582 return ret;
2583}
2584
Yuan Kangacdca312011-07-15 11:21:42 +08002585/*
2586 * allocate and map the ablkcipher extended descriptor for ablkcipher
2587 */
2588static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2589 *req, int desc_bytes,
2590 bool *iv_contig_out)
2591{
2592 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2593 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2594 struct device *jrdev = ctx->jrdev;
2595 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2596 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2597 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002598 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002599 struct ablkcipher_edesc *edesc;
2600 dma_addr_t iv_dma = 0;
2601 bool iv_contig = false;
2602 int sgc;
2603 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kanga299c832012-06-22 19:48:46 -05002604 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002605
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002606 src_nents = sg_count(req->src, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002607
Yuan Kang643b39b2012-06-22 19:48:49 -05002608 if (req->dst != req->src)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002609 dst_nents = sg_count(req->dst, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002610
2611 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002612 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2613 DMA_BIDIRECTIONAL);
Yuan Kangacdca312011-07-15 11:21:42 +08002614 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002615 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2616 DMA_TO_DEVICE);
2617 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2618 DMA_FROM_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08002619 }
2620
Horia Geantace572082014-07-11 15:34:49 +03002621 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2622 if (dma_mapping_error(jrdev, iv_dma)) {
2623 dev_err(jrdev, "unable to map IV\n");
2624 return ERR_PTR(-ENOMEM);
2625 }
2626
Yuan Kangacdca312011-07-15 11:21:42 +08002627 /*
2628 * Check if iv can be contiguous with source and destination.
2629 * If so, include it. If not, create scatterlist.
2630 */
Yuan Kangacdca312011-07-15 11:21:42 +08002631 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2632 iv_contig = true;
2633 else
2634 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002635 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2636 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002637
2638 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002639 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2640 GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002641 if (!edesc) {
2642 dev_err(jrdev, "could not allocate extended descriptor\n");
2643 return ERR_PTR(-ENOMEM);
2644 }
2645
2646 edesc->src_nents = src_nents;
2647 edesc->dst_nents = dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -05002648 edesc->sec4_sg_bytes = sec4_sg_bytes;
2649 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2650 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002651
Yuan Kanga299c832012-06-22 19:48:46 -05002652 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002653 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002654 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2655 sg_to_sec4_sg_last(req->src, src_nents,
2656 edesc->sec4_sg + 1, 0);
2657 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002658 }
2659
Yuan Kang643b39b2012-06-22 19:48:49 -05002660 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002661 sg_to_sec4_sg_last(req->dst, dst_nents,
2662 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002663 }
2664
Yuan Kanga299c832012-06-22 19:48:46 -05002665 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2666 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002667 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2668 dev_err(jrdev, "unable to map S/G table\n");
2669 return ERR_PTR(-ENOMEM);
2670 }
2671
Yuan Kangacdca312011-07-15 11:21:42 +08002672 edesc->iv_dma = iv_dma;
2673
2674#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002675 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002676 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2677 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002678#endif
2679
2680 *iv_contig_out = iv_contig;
2681 return edesc;
2682}
2683
2684static int ablkcipher_encrypt(struct ablkcipher_request *req)
2685{
2686 struct ablkcipher_edesc *edesc;
2687 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2688 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2689 struct device *jrdev = ctx->jrdev;
2690 bool iv_contig;
2691 u32 *desc;
2692 int ret = 0;
2693
2694 /* allocate extended descriptor */
2695 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2696 CAAM_CMD_SZ, &iv_contig);
2697 if (IS_ERR(edesc))
2698 return PTR_ERR(edesc);
2699
2700 /* Create and submit job descriptor*/
2701 init_ablkcipher_job(ctx->sh_desc_enc,
2702 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2703#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002704 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002705 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2706 desc_bytes(edesc->hw_desc), 1);
2707#endif
2708 desc = edesc->hw_desc;
2709 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2710
2711 if (!ret) {
2712 ret = -EINPROGRESS;
2713 } else {
2714 ablkcipher_unmap(jrdev, edesc, req);
2715 kfree(edesc);
2716 }
2717
2718 return ret;
2719}
2720
2721static int ablkcipher_decrypt(struct ablkcipher_request *req)
2722{
2723 struct ablkcipher_edesc *edesc;
2724 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2725 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2726 struct device *jrdev = ctx->jrdev;
2727 bool iv_contig;
2728 u32 *desc;
2729 int ret = 0;
2730
2731 /* allocate extended descriptor */
2732 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2733 CAAM_CMD_SZ, &iv_contig);
2734 if (IS_ERR(edesc))
2735 return PTR_ERR(edesc);
2736
2737 /* Create and submit job descriptor*/
2738 init_ablkcipher_job(ctx->sh_desc_dec,
2739 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2740 desc = edesc->hw_desc;
2741#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002742 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002743 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2744 desc_bytes(edesc->hw_desc), 1);
2745#endif
2746
2747 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2748 if (!ret) {
2749 ret = -EINPROGRESS;
2750 } else {
2751 ablkcipher_unmap(jrdev, edesc, req);
2752 kfree(edesc);
2753 }
2754
2755 return ret;
2756}
2757
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002758/*
2759 * allocate and map the ablkcipher extended descriptor
2760 * for ablkcipher givencrypt
2761 */
2762static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2763 struct skcipher_givcrypt_request *greq,
2764 int desc_bytes,
2765 bool *iv_contig_out)
2766{
2767 struct ablkcipher_request *req = &greq->creq;
2768 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2769 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2770 struct device *jrdev = ctx->jrdev;
2771 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2772 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2773 GFP_KERNEL : GFP_ATOMIC;
2774 int src_nents, dst_nents = 0, sec4_sg_bytes;
2775 struct ablkcipher_edesc *edesc;
2776 dma_addr_t iv_dma = 0;
2777 bool iv_contig = false;
2778 int sgc;
2779 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002780 int sec4_sg_index;
2781
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002782 src_nents = sg_count(req->src, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002783
2784 if (unlikely(req->dst != req->src))
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002785 dst_nents = sg_count(req->dst, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002786
2787 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002788 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2789 DMA_BIDIRECTIONAL);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002790 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002791 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2792 DMA_TO_DEVICE);
2793 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2794 DMA_FROM_DEVICE);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002795 }
2796
2797 /*
2798 * Check if iv can be contiguous with source and destination.
2799 * If so, include it. If not, create scatterlist.
2800 */
2801 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2802 if (dma_mapping_error(jrdev, iv_dma)) {
2803 dev_err(jrdev, "unable to map IV\n");
2804 return ERR_PTR(-ENOMEM);
2805 }
2806
2807 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2808 iv_contig = true;
2809 else
2810 dst_nents = dst_nents ? : 1;
2811 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2812 sizeof(struct sec4_sg_entry);
2813
2814 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002815 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2816 GFP_DMA | flags);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002817 if (!edesc) {
2818 dev_err(jrdev, "could not allocate extended descriptor\n");
2819 return ERR_PTR(-ENOMEM);
2820 }
2821
2822 edesc->src_nents = src_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002823 edesc->dst_nents = dst_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002824 edesc->sec4_sg_bytes = sec4_sg_bytes;
2825 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2826 desc_bytes;
2827
2828 sec4_sg_index = 0;
2829 if (src_nents) {
2830 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2831 sec4_sg_index += src_nents;
2832 }
2833
2834 if (!iv_contig) {
2835 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2836 iv_dma, ivsize, 0);
2837 sec4_sg_index += 1;
2838 sg_to_sec4_sg_last(req->dst, dst_nents,
2839 edesc->sec4_sg + sec4_sg_index, 0);
2840 }
2841
2842 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2843 sec4_sg_bytes, DMA_TO_DEVICE);
2844 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2845 dev_err(jrdev, "unable to map S/G table\n");
2846 return ERR_PTR(-ENOMEM);
2847 }
2848 edesc->iv_dma = iv_dma;
2849
2850#ifdef DEBUG
2851 print_hex_dump(KERN_ERR,
2852 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2853 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2854 sec4_sg_bytes, 1);
2855#endif
2856
2857 *iv_contig_out = iv_contig;
2858 return edesc;
2859}
2860
2861static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2862{
2863 struct ablkcipher_request *req = &creq->creq;
2864 struct ablkcipher_edesc *edesc;
2865 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2866 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2867 struct device *jrdev = ctx->jrdev;
2868 bool iv_contig;
2869 u32 *desc;
2870 int ret = 0;
2871
2872 /* allocate extended descriptor */
2873 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2874 CAAM_CMD_SZ, &iv_contig);
2875 if (IS_ERR(edesc))
2876 return PTR_ERR(edesc);
2877
2878 /* Create and submit job descriptor*/
2879 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2880 edesc, req, iv_contig);
2881#ifdef DEBUG
2882 print_hex_dump(KERN_ERR,
2883 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2884 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2885 desc_bytes(edesc->hw_desc), 1);
2886#endif
2887 desc = edesc->hw_desc;
2888 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2889
2890 if (!ret) {
2891 ret = -EINPROGRESS;
2892 } else {
2893 ablkcipher_unmap(jrdev, edesc, req);
2894 kfree(edesc);
2895 }
2896
2897 return ret;
2898}
2899
Yuan Kang885e9e22011-07-15 11:21:41 +08002900#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002901#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002902struct caam_alg_template {
2903 char name[CRYPTO_MAX_ALG_NAME];
2904 char driver_name[CRYPTO_MAX_ALG_NAME];
2905 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002906 u32 type;
2907 union {
2908 struct ablkcipher_alg ablkcipher;
Yuan Kang885e9e22011-07-15 11:21:41 +08002909 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002910 u32 class1_alg_type;
2911 u32 class2_alg_type;
2912 u32 alg_op;
2913};
2914
2915static struct caam_alg_template driver_algs[] = {
Yuan Kangacdca312011-07-15 11:21:42 +08002916 /* ablkcipher descriptor */
2917 {
2918 .name = "cbc(aes)",
2919 .driver_name = "cbc-aes-caam",
2920 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002921 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002922 .template_ablkcipher = {
2923 .setkey = ablkcipher_setkey,
2924 .encrypt = ablkcipher_encrypt,
2925 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002926 .givencrypt = ablkcipher_givencrypt,
2927 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002928 .min_keysize = AES_MIN_KEY_SIZE,
2929 .max_keysize = AES_MAX_KEY_SIZE,
2930 .ivsize = AES_BLOCK_SIZE,
2931 },
2932 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2933 },
2934 {
2935 .name = "cbc(des3_ede)",
2936 .driver_name = "cbc-3des-caam",
2937 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002938 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002939 .template_ablkcipher = {
2940 .setkey = ablkcipher_setkey,
2941 .encrypt = ablkcipher_encrypt,
2942 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002943 .givencrypt = ablkcipher_givencrypt,
2944 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002945 .min_keysize = DES3_EDE_KEY_SIZE,
2946 .max_keysize = DES3_EDE_KEY_SIZE,
2947 .ivsize = DES3_EDE_BLOCK_SIZE,
2948 },
2949 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2950 },
2951 {
2952 .name = "cbc(des)",
2953 .driver_name = "cbc-des-caam",
2954 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002955 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002956 .template_ablkcipher = {
2957 .setkey = ablkcipher_setkey,
2958 .encrypt = ablkcipher_encrypt,
2959 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002960 .givencrypt = ablkcipher_givencrypt,
2961 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002962 .min_keysize = DES_KEY_SIZE,
2963 .max_keysize = DES_KEY_SIZE,
2964 .ivsize = DES_BLOCK_SIZE,
2965 },
2966 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02002967 },
2968 {
2969 .name = "ctr(aes)",
2970 .driver_name = "ctr-aes-caam",
2971 .blocksize = 1,
2972 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2973 .template_ablkcipher = {
2974 .setkey = ablkcipher_setkey,
2975 .encrypt = ablkcipher_encrypt,
2976 .decrypt = ablkcipher_decrypt,
2977 .geniv = "chainiv",
2978 .min_keysize = AES_MIN_KEY_SIZE,
2979 .max_keysize = AES_MAX_KEY_SIZE,
2980 .ivsize = AES_BLOCK_SIZE,
2981 },
2982 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002983 },
2984 {
2985 .name = "rfc3686(ctr(aes))",
2986 .driver_name = "rfc3686-ctr-aes-caam",
2987 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002988 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002989 .template_ablkcipher = {
2990 .setkey = ablkcipher_setkey,
2991 .encrypt = ablkcipher_encrypt,
2992 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002993 .givencrypt = ablkcipher_givencrypt,
2994 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002995 .min_keysize = AES_MIN_KEY_SIZE +
2996 CTR_RFC3686_NONCE_SIZE,
2997 .max_keysize = AES_MAX_KEY_SIZE +
2998 CTR_RFC3686_NONCE_SIZE,
2999 .ivsize = CTR_RFC3686_IV_SIZE,
3000 },
3001 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilec6415a62015-10-02 13:13:18 +03003002 },
3003 {
3004 .name = "xts(aes)",
3005 .driver_name = "xts-aes-caam",
3006 .blocksize = AES_BLOCK_SIZE,
3007 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3008 .template_ablkcipher = {
3009 .setkey = xts_ablkcipher_setkey,
3010 .encrypt = ablkcipher_encrypt,
3011 .decrypt = ablkcipher_decrypt,
3012 .geniv = "eseqiv",
3013 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3014 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3015 .ivsize = AES_BLOCK_SIZE,
3016 },
3017 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
3018 },
Kim Phillips8e8ec592011-03-13 16:54:26 +08003019};
3020
Herbert Xuf2147b82015-06-16 13:54:23 +08003021static struct caam_aead_alg driver_aeads[] = {
3022 {
3023 .aead = {
3024 .base = {
3025 .cra_name = "rfc4106(gcm(aes))",
3026 .cra_driver_name = "rfc4106-gcm-aes-caam",
3027 .cra_blocksize = 1,
3028 },
3029 .setkey = rfc4106_setkey,
3030 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003031 .encrypt = ipsec_gcm_encrypt,
3032 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003033 .ivsize = 8,
3034 .maxauthsize = AES_BLOCK_SIZE,
3035 },
3036 .caam = {
3037 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3038 },
3039 },
3040 {
3041 .aead = {
3042 .base = {
3043 .cra_name = "rfc4543(gcm(aes))",
3044 .cra_driver_name = "rfc4543-gcm-aes-caam",
3045 .cra_blocksize = 1,
3046 },
3047 .setkey = rfc4543_setkey,
3048 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003049 .encrypt = ipsec_gcm_encrypt,
3050 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003051 .ivsize = 8,
3052 .maxauthsize = AES_BLOCK_SIZE,
3053 },
3054 .caam = {
3055 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3056 },
3057 },
3058 /* Galois Counter Mode */
3059 {
3060 .aead = {
3061 .base = {
3062 .cra_name = "gcm(aes)",
3063 .cra_driver_name = "gcm-aes-caam",
3064 .cra_blocksize = 1,
3065 },
3066 .setkey = gcm_setkey,
3067 .setauthsize = gcm_setauthsize,
3068 .encrypt = gcm_encrypt,
3069 .decrypt = gcm_decrypt,
3070 .ivsize = 12,
3071 .maxauthsize = AES_BLOCK_SIZE,
3072 },
3073 .caam = {
3074 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3075 },
3076 },
Herbert Xu479bcc72015-07-30 17:53:17 +08003077 /* single-pass ipsec_esp descriptor */
3078 {
3079 .aead = {
3080 .base = {
3081 .cra_name = "authenc(hmac(md5),"
3082 "ecb(cipher_null))",
3083 .cra_driver_name = "authenc-hmac-md5-"
3084 "ecb-cipher_null-caam",
3085 .cra_blocksize = NULL_BLOCK_SIZE,
3086 },
3087 .setkey = aead_setkey,
3088 .setauthsize = aead_setauthsize,
3089 .encrypt = aead_encrypt,
3090 .decrypt = aead_decrypt,
3091 .ivsize = NULL_IV_SIZE,
3092 .maxauthsize = MD5_DIGEST_SIZE,
3093 },
3094 .caam = {
3095 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3096 OP_ALG_AAI_HMAC_PRECOMP,
3097 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3098 },
3099 },
3100 {
3101 .aead = {
3102 .base = {
3103 .cra_name = "authenc(hmac(sha1),"
3104 "ecb(cipher_null))",
3105 .cra_driver_name = "authenc-hmac-sha1-"
3106 "ecb-cipher_null-caam",
3107 .cra_blocksize = NULL_BLOCK_SIZE,
3108 },
3109 .setkey = aead_setkey,
3110 .setauthsize = aead_setauthsize,
3111 .encrypt = aead_encrypt,
3112 .decrypt = aead_decrypt,
3113 .ivsize = NULL_IV_SIZE,
3114 .maxauthsize = SHA1_DIGEST_SIZE,
3115 },
3116 .caam = {
3117 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3118 OP_ALG_AAI_HMAC_PRECOMP,
3119 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3120 },
3121 },
3122 {
3123 .aead = {
3124 .base = {
3125 .cra_name = "authenc(hmac(sha224),"
3126 "ecb(cipher_null))",
3127 .cra_driver_name = "authenc-hmac-sha224-"
3128 "ecb-cipher_null-caam",
3129 .cra_blocksize = NULL_BLOCK_SIZE,
3130 },
3131 .setkey = aead_setkey,
3132 .setauthsize = aead_setauthsize,
3133 .encrypt = aead_encrypt,
3134 .decrypt = aead_decrypt,
3135 .ivsize = NULL_IV_SIZE,
3136 .maxauthsize = SHA224_DIGEST_SIZE,
3137 },
3138 .caam = {
3139 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3140 OP_ALG_AAI_HMAC_PRECOMP,
3141 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3142 },
3143 },
3144 {
3145 .aead = {
3146 .base = {
3147 .cra_name = "authenc(hmac(sha256),"
3148 "ecb(cipher_null))",
3149 .cra_driver_name = "authenc-hmac-sha256-"
3150 "ecb-cipher_null-caam",
3151 .cra_blocksize = NULL_BLOCK_SIZE,
3152 },
3153 .setkey = aead_setkey,
3154 .setauthsize = aead_setauthsize,
3155 .encrypt = aead_encrypt,
3156 .decrypt = aead_decrypt,
3157 .ivsize = NULL_IV_SIZE,
3158 .maxauthsize = SHA256_DIGEST_SIZE,
3159 },
3160 .caam = {
3161 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3162 OP_ALG_AAI_HMAC_PRECOMP,
3163 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3164 },
3165 },
3166 {
3167 .aead = {
3168 .base = {
3169 .cra_name = "authenc(hmac(sha384),"
3170 "ecb(cipher_null))",
3171 .cra_driver_name = "authenc-hmac-sha384-"
3172 "ecb-cipher_null-caam",
3173 .cra_blocksize = NULL_BLOCK_SIZE,
3174 },
3175 .setkey = aead_setkey,
3176 .setauthsize = aead_setauthsize,
3177 .encrypt = aead_encrypt,
3178 .decrypt = aead_decrypt,
3179 .ivsize = NULL_IV_SIZE,
3180 .maxauthsize = SHA384_DIGEST_SIZE,
3181 },
3182 .caam = {
3183 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3184 OP_ALG_AAI_HMAC_PRECOMP,
3185 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3186 },
3187 },
3188 {
3189 .aead = {
3190 .base = {
3191 .cra_name = "authenc(hmac(sha512),"
3192 "ecb(cipher_null))",
3193 .cra_driver_name = "authenc-hmac-sha512-"
3194 "ecb-cipher_null-caam",
3195 .cra_blocksize = NULL_BLOCK_SIZE,
3196 },
3197 .setkey = aead_setkey,
3198 .setauthsize = aead_setauthsize,
3199 .encrypt = aead_encrypt,
3200 .decrypt = aead_decrypt,
3201 .ivsize = NULL_IV_SIZE,
3202 .maxauthsize = SHA512_DIGEST_SIZE,
3203 },
3204 .caam = {
3205 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3206 OP_ALG_AAI_HMAC_PRECOMP,
3207 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3208 },
3209 },
3210 {
3211 .aead = {
3212 .base = {
3213 .cra_name = "authenc(hmac(md5),cbc(aes))",
3214 .cra_driver_name = "authenc-hmac-md5-"
3215 "cbc-aes-caam",
3216 .cra_blocksize = AES_BLOCK_SIZE,
3217 },
3218 .setkey = aead_setkey,
3219 .setauthsize = aead_setauthsize,
3220 .encrypt = aead_encrypt,
3221 .decrypt = aead_decrypt,
3222 .ivsize = AES_BLOCK_SIZE,
3223 .maxauthsize = MD5_DIGEST_SIZE,
3224 },
3225 .caam = {
3226 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3227 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3228 OP_ALG_AAI_HMAC_PRECOMP,
3229 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3230 },
3231 },
3232 {
3233 .aead = {
3234 .base = {
3235 .cra_name = "echainiv(authenc(hmac(md5),"
3236 "cbc(aes)))",
3237 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3238 "cbc-aes-caam",
3239 .cra_blocksize = AES_BLOCK_SIZE,
3240 },
3241 .setkey = aead_setkey,
3242 .setauthsize = aead_setauthsize,
3243 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003244 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003245 .ivsize = AES_BLOCK_SIZE,
3246 .maxauthsize = MD5_DIGEST_SIZE,
3247 },
3248 .caam = {
3249 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3250 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3251 OP_ALG_AAI_HMAC_PRECOMP,
3252 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3253 .geniv = true,
3254 },
3255 },
3256 {
3257 .aead = {
3258 .base = {
3259 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3260 .cra_driver_name = "authenc-hmac-sha1-"
3261 "cbc-aes-caam",
3262 .cra_blocksize = AES_BLOCK_SIZE,
3263 },
3264 .setkey = aead_setkey,
3265 .setauthsize = aead_setauthsize,
3266 .encrypt = aead_encrypt,
3267 .decrypt = aead_decrypt,
3268 .ivsize = AES_BLOCK_SIZE,
3269 .maxauthsize = SHA1_DIGEST_SIZE,
3270 },
3271 .caam = {
3272 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3273 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3274 OP_ALG_AAI_HMAC_PRECOMP,
3275 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3276 },
3277 },
3278 {
3279 .aead = {
3280 .base = {
3281 .cra_name = "echainiv(authenc(hmac(sha1),"
3282 "cbc(aes)))",
3283 .cra_driver_name = "echainiv-authenc-"
3284 "hmac-sha1-cbc-aes-caam",
3285 .cra_blocksize = AES_BLOCK_SIZE,
3286 },
3287 .setkey = aead_setkey,
3288 .setauthsize = aead_setauthsize,
3289 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003290 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003291 .ivsize = AES_BLOCK_SIZE,
3292 .maxauthsize = SHA1_DIGEST_SIZE,
3293 },
3294 .caam = {
3295 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3296 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3297 OP_ALG_AAI_HMAC_PRECOMP,
3298 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3299 .geniv = true,
3300 },
3301 },
3302 {
3303 .aead = {
3304 .base = {
3305 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3306 .cra_driver_name = "authenc-hmac-sha224-"
3307 "cbc-aes-caam",
3308 .cra_blocksize = AES_BLOCK_SIZE,
3309 },
3310 .setkey = aead_setkey,
3311 .setauthsize = aead_setauthsize,
3312 .encrypt = aead_encrypt,
3313 .decrypt = aead_decrypt,
3314 .ivsize = AES_BLOCK_SIZE,
3315 .maxauthsize = SHA224_DIGEST_SIZE,
3316 },
3317 .caam = {
3318 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3319 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3320 OP_ALG_AAI_HMAC_PRECOMP,
3321 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3322 },
3323 },
3324 {
3325 .aead = {
3326 .base = {
3327 .cra_name = "echainiv(authenc(hmac(sha224),"
3328 "cbc(aes)))",
3329 .cra_driver_name = "echainiv-authenc-"
3330 "hmac-sha224-cbc-aes-caam",
3331 .cra_blocksize = AES_BLOCK_SIZE,
3332 },
3333 .setkey = aead_setkey,
3334 .setauthsize = aead_setauthsize,
3335 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003336 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003337 .ivsize = AES_BLOCK_SIZE,
3338 .maxauthsize = SHA224_DIGEST_SIZE,
3339 },
3340 .caam = {
3341 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3342 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3343 OP_ALG_AAI_HMAC_PRECOMP,
3344 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3345 .geniv = true,
3346 },
3347 },
3348 {
3349 .aead = {
3350 .base = {
3351 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3352 .cra_driver_name = "authenc-hmac-sha256-"
3353 "cbc-aes-caam",
3354 .cra_blocksize = AES_BLOCK_SIZE,
3355 },
3356 .setkey = aead_setkey,
3357 .setauthsize = aead_setauthsize,
3358 .encrypt = aead_encrypt,
3359 .decrypt = aead_decrypt,
3360 .ivsize = AES_BLOCK_SIZE,
3361 .maxauthsize = SHA256_DIGEST_SIZE,
3362 },
3363 .caam = {
3364 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3365 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3366 OP_ALG_AAI_HMAC_PRECOMP,
3367 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3368 },
3369 },
3370 {
3371 .aead = {
3372 .base = {
3373 .cra_name = "echainiv(authenc(hmac(sha256),"
3374 "cbc(aes)))",
3375 .cra_driver_name = "echainiv-authenc-"
3376 "hmac-sha256-cbc-aes-caam",
3377 .cra_blocksize = AES_BLOCK_SIZE,
3378 },
3379 .setkey = aead_setkey,
3380 .setauthsize = aead_setauthsize,
3381 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003382 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003383 .ivsize = AES_BLOCK_SIZE,
3384 .maxauthsize = SHA256_DIGEST_SIZE,
3385 },
3386 .caam = {
3387 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3388 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3389 OP_ALG_AAI_HMAC_PRECOMP,
3390 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3391 .geniv = true,
3392 },
3393 },
3394 {
3395 .aead = {
3396 .base = {
3397 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3398 .cra_driver_name = "authenc-hmac-sha384-"
3399 "cbc-aes-caam",
3400 .cra_blocksize = AES_BLOCK_SIZE,
3401 },
3402 .setkey = aead_setkey,
3403 .setauthsize = aead_setauthsize,
3404 .encrypt = aead_encrypt,
3405 .decrypt = aead_decrypt,
3406 .ivsize = AES_BLOCK_SIZE,
3407 .maxauthsize = SHA384_DIGEST_SIZE,
3408 },
3409 .caam = {
3410 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3411 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3412 OP_ALG_AAI_HMAC_PRECOMP,
3413 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3414 },
3415 },
3416 {
3417 .aead = {
3418 .base = {
3419 .cra_name = "echainiv(authenc(hmac(sha384),"
3420 "cbc(aes)))",
3421 .cra_driver_name = "echainiv-authenc-"
3422 "hmac-sha384-cbc-aes-caam",
3423 .cra_blocksize = AES_BLOCK_SIZE,
3424 },
3425 .setkey = aead_setkey,
3426 .setauthsize = aead_setauthsize,
3427 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003428 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003429 .ivsize = AES_BLOCK_SIZE,
3430 .maxauthsize = SHA384_DIGEST_SIZE,
3431 },
3432 .caam = {
3433 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3434 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3435 OP_ALG_AAI_HMAC_PRECOMP,
3436 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3437 .geniv = true,
3438 },
3439 },
3440 {
3441 .aead = {
3442 .base = {
3443 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3444 .cra_driver_name = "authenc-hmac-sha512-"
3445 "cbc-aes-caam",
3446 .cra_blocksize = AES_BLOCK_SIZE,
3447 },
3448 .setkey = aead_setkey,
3449 .setauthsize = aead_setauthsize,
3450 .encrypt = aead_encrypt,
3451 .decrypt = aead_decrypt,
3452 .ivsize = AES_BLOCK_SIZE,
3453 .maxauthsize = SHA512_DIGEST_SIZE,
3454 },
3455 .caam = {
3456 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3457 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3458 OP_ALG_AAI_HMAC_PRECOMP,
3459 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3460 },
3461 },
3462 {
3463 .aead = {
3464 .base = {
3465 .cra_name = "echainiv(authenc(hmac(sha512),"
3466 "cbc(aes)))",
3467 .cra_driver_name = "echainiv-authenc-"
3468 "hmac-sha512-cbc-aes-caam",
3469 .cra_blocksize = AES_BLOCK_SIZE,
3470 },
3471 .setkey = aead_setkey,
3472 .setauthsize = aead_setauthsize,
3473 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003474 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003475 .ivsize = AES_BLOCK_SIZE,
3476 .maxauthsize = SHA512_DIGEST_SIZE,
3477 },
3478 .caam = {
3479 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3480 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3481 OP_ALG_AAI_HMAC_PRECOMP,
3482 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3483 .geniv = true,
3484 },
3485 },
3486 {
3487 .aead = {
3488 .base = {
3489 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3490 .cra_driver_name = "authenc-hmac-md5-"
3491 "cbc-des3_ede-caam",
3492 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3493 },
3494 .setkey = aead_setkey,
3495 .setauthsize = aead_setauthsize,
3496 .encrypt = aead_encrypt,
3497 .decrypt = aead_decrypt,
3498 .ivsize = DES3_EDE_BLOCK_SIZE,
3499 .maxauthsize = MD5_DIGEST_SIZE,
3500 },
3501 .caam = {
3502 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3503 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3504 OP_ALG_AAI_HMAC_PRECOMP,
3505 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3506 }
3507 },
3508 {
3509 .aead = {
3510 .base = {
3511 .cra_name = "echainiv(authenc(hmac(md5),"
3512 "cbc(des3_ede)))",
3513 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3514 "cbc-des3_ede-caam",
3515 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3516 },
3517 .setkey = aead_setkey,
3518 .setauthsize = aead_setauthsize,
3519 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003520 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003521 .ivsize = DES3_EDE_BLOCK_SIZE,
3522 .maxauthsize = MD5_DIGEST_SIZE,
3523 },
3524 .caam = {
3525 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3526 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3527 OP_ALG_AAI_HMAC_PRECOMP,
3528 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3529 .geniv = true,
3530 }
3531 },
3532 {
3533 .aead = {
3534 .base = {
3535 .cra_name = "authenc(hmac(sha1),"
3536 "cbc(des3_ede))",
3537 .cra_driver_name = "authenc-hmac-sha1-"
3538 "cbc-des3_ede-caam",
3539 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3540 },
3541 .setkey = aead_setkey,
3542 .setauthsize = aead_setauthsize,
3543 .encrypt = aead_encrypt,
3544 .decrypt = aead_decrypt,
3545 .ivsize = DES3_EDE_BLOCK_SIZE,
3546 .maxauthsize = SHA1_DIGEST_SIZE,
3547 },
3548 .caam = {
3549 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3550 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3551 OP_ALG_AAI_HMAC_PRECOMP,
3552 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3553 },
3554 },
3555 {
3556 .aead = {
3557 .base = {
3558 .cra_name = "echainiv(authenc(hmac(sha1),"
3559 "cbc(des3_ede)))",
3560 .cra_driver_name = "echainiv-authenc-"
3561 "hmac-sha1-"
3562 "cbc-des3_ede-caam",
3563 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3564 },
3565 .setkey = aead_setkey,
3566 .setauthsize = aead_setauthsize,
3567 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003568 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003569 .ivsize = DES3_EDE_BLOCK_SIZE,
3570 .maxauthsize = SHA1_DIGEST_SIZE,
3571 },
3572 .caam = {
3573 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3574 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3575 OP_ALG_AAI_HMAC_PRECOMP,
3576 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3577 .geniv = true,
3578 },
3579 },
3580 {
3581 .aead = {
3582 .base = {
3583 .cra_name = "authenc(hmac(sha224),"
3584 "cbc(des3_ede))",
3585 .cra_driver_name = "authenc-hmac-sha224-"
3586 "cbc-des3_ede-caam",
3587 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3588 },
3589 .setkey = aead_setkey,
3590 .setauthsize = aead_setauthsize,
3591 .encrypt = aead_encrypt,
3592 .decrypt = aead_decrypt,
3593 .ivsize = DES3_EDE_BLOCK_SIZE,
3594 .maxauthsize = SHA224_DIGEST_SIZE,
3595 },
3596 .caam = {
3597 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3598 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3599 OP_ALG_AAI_HMAC_PRECOMP,
3600 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3601 },
3602 },
3603 {
3604 .aead = {
3605 .base = {
3606 .cra_name = "echainiv(authenc(hmac(sha224),"
3607 "cbc(des3_ede)))",
3608 .cra_driver_name = "echainiv-authenc-"
3609 "hmac-sha224-"
3610 "cbc-des3_ede-caam",
3611 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3612 },
3613 .setkey = aead_setkey,
3614 .setauthsize = aead_setauthsize,
3615 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003616 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003617 .ivsize = DES3_EDE_BLOCK_SIZE,
3618 .maxauthsize = SHA224_DIGEST_SIZE,
3619 },
3620 .caam = {
3621 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3622 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3623 OP_ALG_AAI_HMAC_PRECOMP,
3624 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3625 .geniv = true,
3626 },
3627 },
3628 {
3629 .aead = {
3630 .base = {
3631 .cra_name = "authenc(hmac(sha256),"
3632 "cbc(des3_ede))",
3633 .cra_driver_name = "authenc-hmac-sha256-"
3634 "cbc-des3_ede-caam",
3635 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3636 },
3637 .setkey = aead_setkey,
3638 .setauthsize = aead_setauthsize,
3639 .encrypt = aead_encrypt,
3640 .decrypt = aead_decrypt,
3641 .ivsize = DES3_EDE_BLOCK_SIZE,
3642 .maxauthsize = SHA256_DIGEST_SIZE,
3643 },
3644 .caam = {
3645 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3646 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3647 OP_ALG_AAI_HMAC_PRECOMP,
3648 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3649 },
3650 },
3651 {
3652 .aead = {
3653 .base = {
3654 .cra_name = "echainiv(authenc(hmac(sha256),"
3655 "cbc(des3_ede)))",
3656 .cra_driver_name = "echainiv-authenc-"
3657 "hmac-sha256-"
3658 "cbc-des3_ede-caam",
3659 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3660 },
3661 .setkey = aead_setkey,
3662 .setauthsize = aead_setauthsize,
3663 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003664 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003665 .ivsize = DES3_EDE_BLOCK_SIZE,
3666 .maxauthsize = SHA256_DIGEST_SIZE,
3667 },
3668 .caam = {
3669 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3670 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3671 OP_ALG_AAI_HMAC_PRECOMP,
3672 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3673 .geniv = true,
3674 },
3675 },
3676 {
3677 .aead = {
3678 .base = {
3679 .cra_name = "authenc(hmac(sha384),"
3680 "cbc(des3_ede))",
3681 .cra_driver_name = "authenc-hmac-sha384-"
3682 "cbc-des3_ede-caam",
3683 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3684 },
3685 .setkey = aead_setkey,
3686 .setauthsize = aead_setauthsize,
3687 .encrypt = aead_encrypt,
3688 .decrypt = aead_decrypt,
3689 .ivsize = DES3_EDE_BLOCK_SIZE,
3690 .maxauthsize = SHA384_DIGEST_SIZE,
3691 },
3692 .caam = {
3693 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3694 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3695 OP_ALG_AAI_HMAC_PRECOMP,
3696 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3697 },
3698 },
3699 {
3700 .aead = {
3701 .base = {
3702 .cra_name = "echainiv(authenc(hmac(sha384),"
3703 "cbc(des3_ede)))",
3704 .cra_driver_name = "echainiv-authenc-"
3705 "hmac-sha384-"
3706 "cbc-des3_ede-caam",
3707 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3708 },
3709 .setkey = aead_setkey,
3710 .setauthsize = aead_setauthsize,
3711 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003712 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003713 .ivsize = DES3_EDE_BLOCK_SIZE,
3714 .maxauthsize = SHA384_DIGEST_SIZE,
3715 },
3716 .caam = {
3717 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3718 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3719 OP_ALG_AAI_HMAC_PRECOMP,
3720 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3721 .geniv = true,
3722 },
3723 },
3724 {
3725 .aead = {
3726 .base = {
3727 .cra_name = "authenc(hmac(sha512),"
3728 "cbc(des3_ede))",
3729 .cra_driver_name = "authenc-hmac-sha512-"
3730 "cbc-des3_ede-caam",
3731 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3732 },
3733 .setkey = aead_setkey,
3734 .setauthsize = aead_setauthsize,
3735 .encrypt = aead_encrypt,
3736 .decrypt = aead_decrypt,
3737 .ivsize = DES3_EDE_BLOCK_SIZE,
3738 .maxauthsize = SHA512_DIGEST_SIZE,
3739 },
3740 .caam = {
3741 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3742 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3743 OP_ALG_AAI_HMAC_PRECOMP,
3744 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3745 },
3746 },
3747 {
3748 .aead = {
3749 .base = {
3750 .cra_name = "echainiv(authenc(hmac(sha512),"
3751 "cbc(des3_ede)))",
3752 .cra_driver_name = "echainiv-authenc-"
3753 "hmac-sha512-"
3754 "cbc-des3_ede-caam",
3755 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3756 },
3757 .setkey = aead_setkey,
3758 .setauthsize = aead_setauthsize,
3759 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003760 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003761 .ivsize = DES3_EDE_BLOCK_SIZE,
3762 .maxauthsize = SHA512_DIGEST_SIZE,
3763 },
3764 .caam = {
3765 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3766 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3767 OP_ALG_AAI_HMAC_PRECOMP,
3768 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3769 .geniv = true,
3770 },
3771 },
3772 {
3773 .aead = {
3774 .base = {
3775 .cra_name = "authenc(hmac(md5),cbc(des))",
3776 .cra_driver_name = "authenc-hmac-md5-"
3777 "cbc-des-caam",
3778 .cra_blocksize = DES_BLOCK_SIZE,
3779 },
3780 .setkey = aead_setkey,
3781 .setauthsize = aead_setauthsize,
3782 .encrypt = aead_encrypt,
3783 .decrypt = aead_decrypt,
3784 .ivsize = DES_BLOCK_SIZE,
3785 .maxauthsize = MD5_DIGEST_SIZE,
3786 },
3787 .caam = {
3788 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3789 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3790 OP_ALG_AAI_HMAC_PRECOMP,
3791 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3792 },
3793 },
3794 {
3795 .aead = {
3796 .base = {
3797 .cra_name = "echainiv(authenc(hmac(md5),"
3798 "cbc(des)))",
3799 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3800 "cbc-des-caam",
3801 .cra_blocksize = DES_BLOCK_SIZE,
3802 },
3803 .setkey = aead_setkey,
3804 .setauthsize = aead_setauthsize,
3805 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003806 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003807 .ivsize = DES_BLOCK_SIZE,
3808 .maxauthsize = MD5_DIGEST_SIZE,
3809 },
3810 .caam = {
3811 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3812 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3813 OP_ALG_AAI_HMAC_PRECOMP,
3814 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3815 .geniv = true,
3816 },
3817 },
3818 {
3819 .aead = {
3820 .base = {
3821 .cra_name = "authenc(hmac(sha1),cbc(des))",
3822 .cra_driver_name = "authenc-hmac-sha1-"
3823 "cbc-des-caam",
3824 .cra_blocksize = DES_BLOCK_SIZE,
3825 },
3826 .setkey = aead_setkey,
3827 .setauthsize = aead_setauthsize,
3828 .encrypt = aead_encrypt,
3829 .decrypt = aead_decrypt,
3830 .ivsize = DES_BLOCK_SIZE,
3831 .maxauthsize = SHA1_DIGEST_SIZE,
3832 },
3833 .caam = {
3834 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3835 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3836 OP_ALG_AAI_HMAC_PRECOMP,
3837 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3838 },
3839 },
3840 {
3841 .aead = {
3842 .base = {
3843 .cra_name = "echainiv(authenc(hmac(sha1),"
3844 "cbc(des)))",
3845 .cra_driver_name = "echainiv-authenc-"
3846 "hmac-sha1-cbc-des-caam",
3847 .cra_blocksize = DES_BLOCK_SIZE,
3848 },
3849 .setkey = aead_setkey,
3850 .setauthsize = aead_setauthsize,
3851 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003852 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003853 .ivsize = DES_BLOCK_SIZE,
3854 .maxauthsize = SHA1_DIGEST_SIZE,
3855 },
3856 .caam = {
3857 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3858 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3859 OP_ALG_AAI_HMAC_PRECOMP,
3860 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3861 .geniv = true,
3862 },
3863 },
3864 {
3865 .aead = {
3866 .base = {
3867 .cra_name = "authenc(hmac(sha224),cbc(des))",
3868 .cra_driver_name = "authenc-hmac-sha224-"
3869 "cbc-des-caam",
3870 .cra_blocksize = DES_BLOCK_SIZE,
3871 },
3872 .setkey = aead_setkey,
3873 .setauthsize = aead_setauthsize,
3874 .encrypt = aead_encrypt,
3875 .decrypt = aead_decrypt,
3876 .ivsize = DES_BLOCK_SIZE,
3877 .maxauthsize = SHA224_DIGEST_SIZE,
3878 },
3879 .caam = {
3880 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3881 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3882 OP_ALG_AAI_HMAC_PRECOMP,
3883 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3884 },
3885 },
3886 {
3887 .aead = {
3888 .base = {
3889 .cra_name = "echainiv(authenc(hmac(sha224),"
3890 "cbc(des)))",
3891 .cra_driver_name = "echainiv-authenc-"
3892 "hmac-sha224-cbc-des-caam",
3893 .cra_blocksize = DES_BLOCK_SIZE,
3894 },
3895 .setkey = aead_setkey,
3896 .setauthsize = aead_setauthsize,
3897 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003898 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003899 .ivsize = DES_BLOCK_SIZE,
3900 .maxauthsize = SHA224_DIGEST_SIZE,
3901 },
3902 .caam = {
3903 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3904 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3905 OP_ALG_AAI_HMAC_PRECOMP,
3906 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3907 .geniv = true,
3908 },
3909 },
3910 {
3911 .aead = {
3912 .base = {
3913 .cra_name = "authenc(hmac(sha256),cbc(des))",
3914 .cra_driver_name = "authenc-hmac-sha256-"
3915 "cbc-des-caam",
3916 .cra_blocksize = DES_BLOCK_SIZE,
3917 },
3918 .setkey = aead_setkey,
3919 .setauthsize = aead_setauthsize,
3920 .encrypt = aead_encrypt,
3921 .decrypt = aead_decrypt,
3922 .ivsize = DES_BLOCK_SIZE,
3923 .maxauthsize = SHA256_DIGEST_SIZE,
3924 },
3925 .caam = {
3926 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3927 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3928 OP_ALG_AAI_HMAC_PRECOMP,
3929 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3930 },
3931 },
3932 {
3933 .aead = {
3934 .base = {
3935 .cra_name = "echainiv(authenc(hmac(sha256),"
3936 "cbc(des)))",
3937 .cra_driver_name = "echainiv-authenc-"
3938 "hmac-sha256-cbc-des-caam",
3939 .cra_blocksize = DES_BLOCK_SIZE,
3940 },
3941 .setkey = aead_setkey,
3942 .setauthsize = aead_setauthsize,
3943 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003944 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003945 .ivsize = DES_BLOCK_SIZE,
3946 .maxauthsize = SHA256_DIGEST_SIZE,
3947 },
3948 .caam = {
3949 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3950 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3951 OP_ALG_AAI_HMAC_PRECOMP,
3952 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3953 .geniv = true,
3954 },
3955 },
3956 {
3957 .aead = {
3958 .base = {
3959 .cra_name = "authenc(hmac(sha384),cbc(des))",
3960 .cra_driver_name = "authenc-hmac-sha384-"
3961 "cbc-des-caam",
3962 .cra_blocksize = DES_BLOCK_SIZE,
3963 },
3964 .setkey = aead_setkey,
3965 .setauthsize = aead_setauthsize,
3966 .encrypt = aead_encrypt,
3967 .decrypt = aead_decrypt,
3968 .ivsize = DES_BLOCK_SIZE,
3969 .maxauthsize = SHA384_DIGEST_SIZE,
3970 },
3971 .caam = {
3972 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3973 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3974 OP_ALG_AAI_HMAC_PRECOMP,
3975 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3976 },
3977 },
3978 {
3979 .aead = {
3980 .base = {
3981 .cra_name = "echainiv(authenc(hmac(sha384),"
3982 "cbc(des)))",
3983 .cra_driver_name = "echainiv-authenc-"
3984 "hmac-sha384-cbc-des-caam",
3985 .cra_blocksize = DES_BLOCK_SIZE,
3986 },
3987 .setkey = aead_setkey,
3988 .setauthsize = aead_setauthsize,
3989 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003990 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003991 .ivsize = DES_BLOCK_SIZE,
3992 .maxauthsize = SHA384_DIGEST_SIZE,
3993 },
3994 .caam = {
3995 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3996 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3997 OP_ALG_AAI_HMAC_PRECOMP,
3998 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3999 .geniv = true,
4000 },
4001 },
4002 {
4003 .aead = {
4004 .base = {
4005 .cra_name = "authenc(hmac(sha512),cbc(des))",
4006 .cra_driver_name = "authenc-hmac-sha512-"
4007 "cbc-des-caam",
4008 .cra_blocksize = DES_BLOCK_SIZE,
4009 },
4010 .setkey = aead_setkey,
4011 .setauthsize = aead_setauthsize,
4012 .encrypt = aead_encrypt,
4013 .decrypt = aead_decrypt,
4014 .ivsize = DES_BLOCK_SIZE,
4015 .maxauthsize = SHA512_DIGEST_SIZE,
4016 },
4017 .caam = {
4018 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4019 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4020 OP_ALG_AAI_HMAC_PRECOMP,
4021 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4022 },
4023 },
4024 {
4025 .aead = {
4026 .base = {
4027 .cra_name = "echainiv(authenc(hmac(sha512),"
4028 "cbc(des)))",
4029 .cra_driver_name = "echainiv-authenc-"
4030 "hmac-sha512-cbc-des-caam",
4031 .cra_blocksize = DES_BLOCK_SIZE,
4032 },
4033 .setkey = aead_setkey,
4034 .setauthsize = aead_setauthsize,
4035 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004036 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004037 .ivsize = DES_BLOCK_SIZE,
4038 .maxauthsize = SHA512_DIGEST_SIZE,
4039 },
4040 .caam = {
4041 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4042 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4043 OP_ALG_AAI_HMAC_PRECOMP,
4044 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4045 .geniv = true,
4046 },
4047 },
4048 {
4049 .aead = {
4050 .base = {
4051 .cra_name = "authenc(hmac(md5),"
4052 "rfc3686(ctr(aes)))",
4053 .cra_driver_name = "authenc-hmac-md5-"
4054 "rfc3686-ctr-aes-caam",
4055 .cra_blocksize = 1,
4056 },
4057 .setkey = aead_setkey,
4058 .setauthsize = aead_setauthsize,
4059 .encrypt = aead_encrypt,
4060 .decrypt = aead_decrypt,
4061 .ivsize = CTR_RFC3686_IV_SIZE,
4062 .maxauthsize = MD5_DIGEST_SIZE,
4063 },
4064 .caam = {
4065 .class1_alg_type = OP_ALG_ALGSEL_AES |
4066 OP_ALG_AAI_CTR_MOD128,
4067 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4068 OP_ALG_AAI_HMAC_PRECOMP,
4069 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4070 .rfc3686 = true,
4071 },
4072 },
4073 {
4074 .aead = {
4075 .base = {
4076 .cra_name = "seqiv(authenc("
4077 "hmac(md5),rfc3686(ctr(aes))))",
4078 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4079 "rfc3686-ctr-aes-caam",
4080 .cra_blocksize = 1,
4081 },
4082 .setkey = aead_setkey,
4083 .setauthsize = aead_setauthsize,
4084 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004085 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004086 .ivsize = CTR_RFC3686_IV_SIZE,
4087 .maxauthsize = MD5_DIGEST_SIZE,
4088 },
4089 .caam = {
4090 .class1_alg_type = OP_ALG_ALGSEL_AES |
4091 OP_ALG_AAI_CTR_MOD128,
4092 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4093 OP_ALG_AAI_HMAC_PRECOMP,
4094 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4095 .rfc3686 = true,
4096 .geniv = true,
4097 },
4098 },
4099 {
4100 .aead = {
4101 .base = {
4102 .cra_name = "authenc(hmac(sha1),"
4103 "rfc3686(ctr(aes)))",
4104 .cra_driver_name = "authenc-hmac-sha1-"
4105 "rfc3686-ctr-aes-caam",
4106 .cra_blocksize = 1,
4107 },
4108 .setkey = aead_setkey,
4109 .setauthsize = aead_setauthsize,
4110 .encrypt = aead_encrypt,
4111 .decrypt = aead_decrypt,
4112 .ivsize = CTR_RFC3686_IV_SIZE,
4113 .maxauthsize = SHA1_DIGEST_SIZE,
4114 },
4115 .caam = {
4116 .class1_alg_type = OP_ALG_ALGSEL_AES |
4117 OP_ALG_AAI_CTR_MOD128,
4118 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4119 OP_ALG_AAI_HMAC_PRECOMP,
4120 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4121 .rfc3686 = true,
4122 },
4123 },
4124 {
4125 .aead = {
4126 .base = {
4127 .cra_name = "seqiv(authenc("
4128 "hmac(sha1),rfc3686(ctr(aes))))",
4129 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4130 "rfc3686-ctr-aes-caam",
4131 .cra_blocksize = 1,
4132 },
4133 .setkey = aead_setkey,
4134 .setauthsize = aead_setauthsize,
4135 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004136 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004137 .ivsize = CTR_RFC3686_IV_SIZE,
4138 .maxauthsize = SHA1_DIGEST_SIZE,
4139 },
4140 .caam = {
4141 .class1_alg_type = OP_ALG_ALGSEL_AES |
4142 OP_ALG_AAI_CTR_MOD128,
4143 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4144 OP_ALG_AAI_HMAC_PRECOMP,
4145 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4146 .rfc3686 = true,
4147 .geniv = true,
4148 },
4149 },
4150 {
4151 .aead = {
4152 .base = {
4153 .cra_name = "authenc(hmac(sha224),"
4154 "rfc3686(ctr(aes)))",
4155 .cra_driver_name = "authenc-hmac-sha224-"
4156 "rfc3686-ctr-aes-caam",
4157 .cra_blocksize = 1,
4158 },
4159 .setkey = aead_setkey,
4160 .setauthsize = aead_setauthsize,
4161 .encrypt = aead_encrypt,
4162 .decrypt = aead_decrypt,
4163 .ivsize = CTR_RFC3686_IV_SIZE,
4164 .maxauthsize = SHA224_DIGEST_SIZE,
4165 },
4166 .caam = {
4167 .class1_alg_type = OP_ALG_ALGSEL_AES |
4168 OP_ALG_AAI_CTR_MOD128,
4169 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4170 OP_ALG_AAI_HMAC_PRECOMP,
4171 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4172 .rfc3686 = true,
4173 },
4174 },
4175 {
4176 .aead = {
4177 .base = {
4178 .cra_name = "seqiv(authenc("
4179 "hmac(sha224),rfc3686(ctr(aes))))",
4180 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4181 "rfc3686-ctr-aes-caam",
4182 .cra_blocksize = 1,
4183 },
4184 .setkey = aead_setkey,
4185 .setauthsize = aead_setauthsize,
4186 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004187 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004188 .ivsize = CTR_RFC3686_IV_SIZE,
4189 .maxauthsize = SHA224_DIGEST_SIZE,
4190 },
4191 .caam = {
4192 .class1_alg_type = OP_ALG_ALGSEL_AES |
4193 OP_ALG_AAI_CTR_MOD128,
4194 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4195 OP_ALG_AAI_HMAC_PRECOMP,
4196 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4197 .rfc3686 = true,
4198 .geniv = true,
4199 },
4200 },
4201 {
4202 .aead = {
4203 .base = {
4204 .cra_name = "authenc(hmac(sha256),"
4205 "rfc3686(ctr(aes)))",
4206 .cra_driver_name = "authenc-hmac-sha256-"
4207 "rfc3686-ctr-aes-caam",
4208 .cra_blocksize = 1,
4209 },
4210 .setkey = aead_setkey,
4211 .setauthsize = aead_setauthsize,
4212 .encrypt = aead_encrypt,
4213 .decrypt = aead_decrypt,
4214 .ivsize = CTR_RFC3686_IV_SIZE,
4215 .maxauthsize = SHA256_DIGEST_SIZE,
4216 },
4217 .caam = {
4218 .class1_alg_type = OP_ALG_ALGSEL_AES |
4219 OP_ALG_AAI_CTR_MOD128,
4220 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4221 OP_ALG_AAI_HMAC_PRECOMP,
4222 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4223 .rfc3686 = true,
4224 },
4225 },
4226 {
4227 .aead = {
4228 .base = {
4229 .cra_name = "seqiv(authenc(hmac(sha256),"
4230 "rfc3686(ctr(aes))))",
4231 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4232 "rfc3686-ctr-aes-caam",
4233 .cra_blocksize = 1,
4234 },
4235 .setkey = aead_setkey,
4236 .setauthsize = aead_setauthsize,
4237 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004238 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004239 .ivsize = CTR_RFC3686_IV_SIZE,
4240 .maxauthsize = SHA256_DIGEST_SIZE,
4241 },
4242 .caam = {
4243 .class1_alg_type = OP_ALG_ALGSEL_AES |
4244 OP_ALG_AAI_CTR_MOD128,
4245 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4246 OP_ALG_AAI_HMAC_PRECOMP,
4247 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4248 .rfc3686 = true,
4249 .geniv = true,
4250 },
4251 },
4252 {
4253 .aead = {
4254 .base = {
4255 .cra_name = "authenc(hmac(sha384),"
4256 "rfc3686(ctr(aes)))",
4257 .cra_driver_name = "authenc-hmac-sha384-"
4258 "rfc3686-ctr-aes-caam",
4259 .cra_blocksize = 1,
4260 },
4261 .setkey = aead_setkey,
4262 .setauthsize = aead_setauthsize,
4263 .encrypt = aead_encrypt,
4264 .decrypt = aead_decrypt,
4265 .ivsize = CTR_RFC3686_IV_SIZE,
4266 .maxauthsize = SHA384_DIGEST_SIZE,
4267 },
4268 .caam = {
4269 .class1_alg_type = OP_ALG_ALGSEL_AES |
4270 OP_ALG_AAI_CTR_MOD128,
4271 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4272 OP_ALG_AAI_HMAC_PRECOMP,
4273 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4274 .rfc3686 = true,
4275 },
4276 },
4277 {
4278 .aead = {
4279 .base = {
4280 .cra_name = "seqiv(authenc(hmac(sha384),"
4281 "rfc3686(ctr(aes))))",
4282 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4283 "rfc3686-ctr-aes-caam",
4284 .cra_blocksize = 1,
4285 },
4286 .setkey = aead_setkey,
4287 .setauthsize = aead_setauthsize,
4288 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004289 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004290 .ivsize = CTR_RFC3686_IV_SIZE,
4291 .maxauthsize = SHA384_DIGEST_SIZE,
4292 },
4293 .caam = {
4294 .class1_alg_type = OP_ALG_ALGSEL_AES |
4295 OP_ALG_AAI_CTR_MOD128,
4296 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4297 OP_ALG_AAI_HMAC_PRECOMP,
4298 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4299 .rfc3686 = true,
4300 .geniv = true,
4301 },
4302 },
4303 {
4304 .aead = {
4305 .base = {
4306 .cra_name = "authenc(hmac(sha512),"
4307 "rfc3686(ctr(aes)))",
4308 .cra_driver_name = "authenc-hmac-sha512-"
4309 "rfc3686-ctr-aes-caam",
4310 .cra_blocksize = 1,
4311 },
4312 .setkey = aead_setkey,
4313 .setauthsize = aead_setauthsize,
4314 .encrypt = aead_encrypt,
4315 .decrypt = aead_decrypt,
4316 .ivsize = CTR_RFC3686_IV_SIZE,
4317 .maxauthsize = SHA512_DIGEST_SIZE,
4318 },
4319 .caam = {
4320 .class1_alg_type = OP_ALG_ALGSEL_AES |
4321 OP_ALG_AAI_CTR_MOD128,
4322 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4323 OP_ALG_AAI_HMAC_PRECOMP,
4324 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4325 .rfc3686 = true,
4326 },
4327 },
4328 {
4329 .aead = {
4330 .base = {
4331 .cra_name = "seqiv(authenc(hmac(sha512),"
4332 "rfc3686(ctr(aes))))",
4333 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4334 "rfc3686-ctr-aes-caam",
4335 .cra_blocksize = 1,
4336 },
4337 .setkey = aead_setkey,
4338 .setauthsize = aead_setauthsize,
4339 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004340 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004341 .ivsize = CTR_RFC3686_IV_SIZE,
4342 .maxauthsize = SHA512_DIGEST_SIZE,
4343 },
4344 .caam = {
4345 .class1_alg_type = OP_ALG_ALGSEL_AES |
4346 OP_ALG_AAI_CTR_MOD128,
4347 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4348 OP_ALG_AAI_HMAC_PRECOMP,
4349 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4350 .rfc3686 = true,
4351 .geniv = true,
4352 },
4353 },
Herbert Xuf2147b82015-06-16 13:54:23 +08004354};
4355
4356struct caam_crypto_alg {
4357 struct crypto_alg crypto_alg;
4358 struct list_head entry;
4359 struct caam_alg_entry caam;
4360};
4361
4362static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4363{
4364 ctx->jrdev = caam_jr_alloc();
4365 if (IS_ERR(ctx->jrdev)) {
4366 pr_err("Job Ring Device allocation for transform failed\n");
4367 return PTR_ERR(ctx->jrdev);
4368 }
4369
4370 /* copy descriptor header template value */
4371 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4372 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4373 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4374
4375 return 0;
4376}
4377
Kim Phillips8e8ec592011-03-13 16:54:26 +08004378static int caam_cra_init(struct crypto_tfm *tfm)
4379{
4380 struct crypto_alg *alg = tfm->__crt_alg;
4381 struct caam_crypto_alg *caam_alg =
4382 container_of(alg, struct caam_crypto_alg, crypto_alg);
4383 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004384
Herbert Xuf2147b82015-06-16 13:54:23 +08004385 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004386}
4387
Herbert Xuf2147b82015-06-16 13:54:23 +08004388static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004389{
Herbert Xuf2147b82015-06-16 13:54:23 +08004390 struct aead_alg *alg = crypto_aead_alg(tfm);
4391 struct caam_aead_alg *caam_alg =
4392 container_of(alg, struct caam_aead_alg, aead);
4393 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004394
Herbert Xuf2147b82015-06-16 13:54:23 +08004395 return caam_init_common(ctx, &caam_alg->caam);
4396}
4397
4398static void caam_exit_common(struct caam_ctx *ctx)
4399{
Yuan Kang1acebad2011-07-15 11:21:42 +08004400 if (ctx->sh_desc_enc_dma &&
4401 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4402 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4403 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4404 if (ctx->sh_desc_dec_dma &&
4405 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4406 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4407 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4408 if (ctx->sh_desc_givenc_dma &&
4409 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4410 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4411 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05004412 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02004413 if (ctx->key_dma &&
4414 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4415 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4416 ctx->enckeylen + ctx->split_key_pad_len,
4417 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304418
4419 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004420}
4421
Herbert Xuf2147b82015-06-16 13:54:23 +08004422static void caam_cra_exit(struct crypto_tfm *tfm)
4423{
4424 caam_exit_common(crypto_tfm_ctx(tfm));
4425}
4426
4427static void caam_aead_exit(struct crypto_aead *tfm)
4428{
4429 caam_exit_common(crypto_aead_ctx(tfm));
4430}
4431
Kim Phillips8e8ec592011-03-13 16:54:26 +08004432static void __exit caam_algapi_exit(void)
4433{
4434
Kim Phillips8e8ec592011-03-13 16:54:26 +08004435 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08004436 int i;
4437
4438 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4439 struct caam_aead_alg *t_alg = driver_aeads + i;
4440
4441 if (t_alg->registered)
4442 crypto_unregister_aead(&t_alg->aead);
4443 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004444
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304445 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004446 return;
4447
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304448 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004449 crypto_unregister_alg(&t_alg->crypto_alg);
4450 list_del(&t_alg->entry);
4451 kfree(t_alg);
4452 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004453}
4454
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304455static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08004456 *template)
4457{
4458 struct caam_crypto_alg *t_alg;
4459 struct crypto_alg *alg;
4460
Fabio Estevam9c4f9732015-08-21 13:52:00 -03004461 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004462 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304463 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004464 return ERR_PTR(-ENOMEM);
4465 }
4466
4467 alg = &t_alg->crypto_alg;
4468
4469 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4470 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4471 template->driver_name);
4472 alg->cra_module = THIS_MODULE;
4473 alg->cra_init = caam_cra_init;
4474 alg->cra_exit = caam_cra_exit;
4475 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004476 alg->cra_blocksize = template->blocksize;
4477 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004478 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01004479 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4480 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08004481 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004482 case CRYPTO_ALG_TYPE_GIVCIPHER:
4483 alg->cra_type = &crypto_givcipher_type;
4484 alg->cra_ablkcipher = template->template_ablkcipher;
4485 break;
Yuan Kangacdca312011-07-15 11:21:42 +08004486 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4487 alg->cra_type = &crypto_ablkcipher_type;
4488 alg->cra_ablkcipher = template->template_ablkcipher;
4489 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08004490 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004491
Herbert Xuf2147b82015-06-16 13:54:23 +08004492 t_alg->caam.class1_alg_type = template->class1_alg_type;
4493 t_alg->caam.class2_alg_type = template->class2_alg_type;
4494 t_alg->caam.alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004495
4496 return t_alg;
4497}
4498
Herbert Xuf2147b82015-06-16 13:54:23 +08004499static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4500{
4501 struct aead_alg *alg = &t_alg->aead;
4502
4503 alg->base.cra_module = THIS_MODULE;
4504 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4505 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu5e4b8c12015-08-13 17:29:06 +08004506 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Herbert Xuf2147b82015-06-16 13:54:23 +08004507
4508 alg->init = caam_aead_init;
4509 alg->exit = caam_aead_exit;
4510}
4511
Kim Phillips8e8ec592011-03-13 16:54:26 +08004512static int __init caam_algapi_init(void)
4513{
Ruchika Gupta35af6402014-07-07 10:42:12 +05304514 struct device_node *dev_node;
4515 struct platform_device *pdev;
4516 struct device *ctrldev;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004517 struct caam_drv_private *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004518 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004519 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4520 unsigned int md_limit = SHA512_DIGEST_SIZE;
Herbert Xuf2147b82015-06-16 13:54:23 +08004521 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004522
Ruchika Gupta35af6402014-07-07 10:42:12 +05304523 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4524 if (!dev_node) {
4525 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4526 if (!dev_node)
4527 return -ENODEV;
4528 }
4529
4530 pdev = of_find_device_by_node(dev_node);
4531 if (!pdev) {
4532 of_node_put(dev_node);
4533 return -ENODEV;
4534 }
4535
4536 ctrldev = &pdev->dev;
4537 priv = dev_get_drvdata(ctrldev);
4538 of_node_put(dev_node);
4539
4540 /*
4541 * If priv is NULL, it's probably because the caam driver wasn't
4542 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4543 */
4544 if (!priv)
4545 return -ENODEV;
4546
4547
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304548 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004549
Victoria Milhoanbf834902015-08-05 11:28:48 -07004550 /*
4551 * Register crypto algorithms the device supports.
4552 * First, detect presence and attributes of DES, AES, and MD blocks.
4553 */
4554 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4555 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4556 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4557 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4558 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004559
Victoria Milhoanbf834902015-08-05 11:28:48 -07004560 /* If MD is present, limit digest size based on LP256 */
4561 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4562 md_limit = SHA256_DIGEST_SIZE;
4563
4564 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4565 struct caam_crypto_alg *t_alg;
4566 struct caam_alg_template *alg = driver_algs + i;
4567 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4568
4569 /* Skip DES algorithms if not supported by device */
4570 if (!des_inst &&
4571 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4572 (alg_sel == OP_ALG_ALGSEL_DES)))
4573 continue;
4574
4575 /* Skip AES algorithms if not supported by device */
4576 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4577 continue;
4578
4579 t_alg = caam_alg_alloc(alg);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004580 if (IS_ERR(t_alg)) {
4581 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07004582 pr_warn("%s alg allocation failed\n", alg->driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004583 continue;
4584 }
4585
4586 err = crypto_register_alg(&t_alg->crypto_alg);
4587 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304588 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004589 t_alg->crypto_alg.cra_driver_name);
4590 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08004591 continue;
4592 }
4593
4594 list_add_tail(&t_alg->entry, &alg_list);
4595 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004596 }
Herbert Xuf2147b82015-06-16 13:54:23 +08004597
4598 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4599 struct caam_aead_alg *t_alg = driver_aeads + i;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004600 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4601 OP_ALG_ALGSEL_MASK;
4602 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4603 OP_ALG_ALGSEL_MASK;
4604 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4605
4606 /* Skip DES algorithms if not supported by device */
4607 if (!des_inst &&
4608 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4609 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4610 continue;
4611
4612 /* Skip AES algorithms if not supported by device */
4613 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4614 continue;
4615
4616 /*
4617 * Check support for AES algorithms not available
4618 * on LP devices.
4619 */
4620 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4621 if (alg_aai == OP_ALG_AAI_GCM)
4622 continue;
4623
4624 /*
4625 * Skip algorithms requiring message digests
4626 * if MD or MD size is not supported by device.
4627 */
4628 if (c2_alg_sel &&
4629 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4630 continue;
Herbert Xuf2147b82015-06-16 13:54:23 +08004631
4632 caam_aead_alg_init(t_alg);
4633
4634 err = crypto_register_aead(&t_alg->aead);
4635 if (err) {
4636 pr_warn("%s alg registration failed\n",
4637 t_alg->aead.base.cra_driver_name);
4638 continue;
4639 }
4640
4641 t_alg->registered = true;
4642 registered = true;
4643 }
4644
4645 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304646 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004647
4648 return err;
4649}
4650
4651module_init(caam_algapi_init);
4652module_exit(caam_algapi_exit);
4653
4654MODULE_LICENSE("GPL");
4655MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4656MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");