blob: 5317d8cad44d93a6d7408267f5c251da74813891 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
Herbert Xu479bcc72015-07-30 17:53:17 +080071#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
Herbert Xuf2147b82015-06-16 13:54:23 +080073
Kim Phillips4427b1b2011-05-14 22:08:17 -050074/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080075#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080076#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080079
Catalin Vasiledaebc462014-10-31 12:45:37 +020080/* Note: Nonce is counted in enckeylen */
Herbert Xu479bcc72015-07-30 17:53:17 +080081#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
Catalin Vasiledaebc462014-10-31 12:45:37 +020082
Horia Geantaae4a8252014-03-14 17:46:52 +020083#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
Herbert Xu479bcc72015-07-30 17:53:17 +080084#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
Horia Geantaae4a8252014-03-14 17:46:52 +020086
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030087#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080088#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030090
Tudor Ambarusbac68f22014-10-23 16:14:03 +030091#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
Horia Geant?4aad0cc2015-07-30 22:11:18 +030092#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
Tudor Ambarusbac68f22014-10-23 16:14:03 +030094
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020095#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080096#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020098
Yuan Kangacdca312011-07-15 11:21:42 +080099#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
104
Herbert Xu87e51b02015-06-18 14:25:55 +0800105#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500107
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108#ifdef DEBUG
109/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800110#define debug(format, arg...) printk(format, arg)
111#else
112#define debug(format, arg...)
113#endif
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300114
115#ifdef DEBUG
116#include <linux/highmem.h>
117
118static void dbg_dump_sg(const char *level, const char *prefix_str,
119 int prefix_type, int rowsize, int groupsize,
120 struct scatterlist *sg, size_t tlen, bool ascii,
121 bool may_sleep)
122{
123 struct scatterlist *it;
124 void *it_page;
125 size_t len;
126 void *buf;
127
128 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
129 /*
130 * make sure the scatterlist's page
131 * has a valid virtual memory mapping
132 */
133 it_page = kmap_atomic(sg_page(it));
134 if (unlikely(!it_page)) {
135 printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
136 return;
137 }
138
139 buf = it_page + it->offset;
Arnd Bergmannd69985a2016-10-25 23:29:10 +0200140 len = min_t(size_t, tlen, it->length);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +0300141 print_hex_dump(level, prefix_str, prefix_type, rowsize,
142 groupsize, buf, len, ascii);
143 tlen -= len;
144
145 kunmap_atomic(it_page);
146 }
147}
148#endif
149
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530150static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800151
Herbert Xu479bcc72015-07-30 17:53:17 +0800152struct caam_alg_entry {
153 int class1_alg_type;
154 int class2_alg_type;
155 int alg_op;
156 bool rfc3686;
157 bool geniv;
158};
159
160struct caam_aead_alg {
161 struct aead_alg aead;
162 struct caam_alg_entry caam;
163 bool registered;
164};
165
Yuan Kang1acebad2011-07-15 11:21:42 +0800166/* Set DK bit in class 1 operation if shared */
167static inline void append_dec_op1(u32 *desc, u32 type)
168{
169 u32 *jump_cmd, *uncond_jump_cmd;
170
Horia Geantaa60384d2014-07-11 15:46:58 +0300171 /* DK bit is valid only for AES */
172 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
173 append_operation(desc, type | OP_ALG_AS_INITFINAL |
174 OP_ALG_DECRYPT);
175 return;
176 }
177
Yuan Kang1acebad2011-07-15 11:21:42 +0800178 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
179 append_operation(desc, type | OP_ALG_AS_INITFINAL |
180 OP_ALG_DECRYPT);
181 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
182 set_jump_tgt_here(desc, jump_cmd);
183 append_operation(desc, type | OP_ALG_AS_INITFINAL |
184 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
185 set_jump_tgt_here(desc, uncond_jump_cmd);
186}
187
188/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800189 * For aead functions, read payload and write payload,
190 * both of which are specified in req->src and req->dst
191 */
192static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
193{
Horia Geantaae4a8252014-03-14 17:46:52 +0200194 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800195 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
196 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800197}
198
199/*
Yuan Kangacdca312011-07-15 11:21:42 +0800200 * For ablkcipher encrypt and decrypt, read from req->src and
201 * write to req->dst
202 */
203static inline void ablkcipher_append_src_dst(u32 *desc)
204{
Kim Phillips70d793c2012-06-22 19:42:35 -0500205 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
206 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
207 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
208 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
209 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800210}
211
212/*
Kim Phillips8e8ec592011-03-13 16:54:26 +0800213 * per-session context
214 */
215struct caam_ctx {
216 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800217 u32 sh_desc_enc[DESC_MAX_USED_LEN];
218 u32 sh_desc_dec[DESC_MAX_USED_LEN];
219 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
220 dma_addr_t sh_desc_enc_dma;
221 dma_addr_t sh_desc_dec_dma;
222 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800223 u32 class1_alg_type;
224 u32 class2_alg_type;
225 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800226 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800227 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800228 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800229 unsigned int split_key_len;
230 unsigned int split_key_pad_len;
231 unsigned int authsize;
232};
233
Yuan Kang1acebad2011-07-15 11:21:42 +0800234static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200235 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800236{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200237 u32 *nonce;
238 unsigned int enckeylen = ctx->enckeylen;
239
240 /*
241 * RFC3686 specific:
242 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
243 * | enckeylen = encryption key size + nonce size
244 */
245 if (is_rfc3686)
246 enckeylen -= CTR_RFC3686_NONCE_SIZE;
247
Yuan Kang1acebad2011-07-15 11:21:42 +0800248 if (keys_fit_inline) {
249 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
250 ctx->split_key_len, CLASS_2 |
251 KEY_DEST_MDHA_SPLIT | KEY_ENC);
252 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200253 ctx->split_key_pad_len, enckeylen,
254 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad2011-07-15 11:21:42 +0800255 } else {
256 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
257 KEY_DEST_MDHA_SPLIT | KEY_ENC);
258 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200259 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
260 }
261
262 /* Load Counter into CONTEXT1 reg */
263 if (is_rfc3686) {
264 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
265 enckeylen);
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300266 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
267 LDST_CLASS_IND_CCB |
268 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200269 append_move(desc,
270 MOVE_SRC_OUTFIFO |
271 MOVE_DEST_CLASS1CTX |
272 (16 << MOVE_OFFSET_SHIFT) |
273 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800274 }
275}
276
277static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200278 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800279{
280 u32 *key_jump_cmd;
281
Catalin Vasiledaebc462014-10-31 12:45:37 +0200282 /* Note: Context registers are saved. */
283 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad2011-07-15 11:21:42 +0800284
285 /* Skip if already shared */
286 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
287 JUMP_COND_SHRD);
288
Catalin Vasiledaebc462014-10-31 12:45:37 +0200289 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800290
291 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800292}
293
Horia Geantaae4a8252014-03-14 17:46:52 +0200294static int aead_null_set_sh_desc(struct crypto_aead *aead)
295{
Horia Geantaae4a8252014-03-14 17:46:52 +0200296 struct caam_ctx *ctx = crypto_aead_ctx(aead);
297 struct device *jrdev = ctx->jrdev;
298 bool keys_fit_inline = false;
299 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
300 u32 *desc;
301
302 /*
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
305 */
Herbert Xu479bcc72015-07-30 17:53:17 +0800306 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
Horia Geantaae4a8252014-03-14 17:46:52 +0200307 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308 keys_fit_inline = true;
309
Herbert Xu479bcc72015-07-30 17:53:17 +0800310 /* aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200311 desc = ctx->sh_desc_enc;
312
313 init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315 /* Skip if already shared */
316 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
317 JUMP_COND_SHRD);
318 if (keys_fit_inline)
319 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
320 ctx->split_key_len, CLASS_2 |
321 KEY_DEST_MDHA_SPLIT | KEY_ENC);
322 else
323 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
324 KEY_DEST_MDHA_SPLIT | KEY_ENC);
325 set_jump_tgt_here(desc, key_jump_cmd);
326
Herbert Xu479bcc72015-07-30 17:53:17 +0800327 /* assoclen + cryptlen = seqinlen */
328 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200329
Herbert Xu479bcc72015-07-30 17:53:17 +0800330 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200331 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
332 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
333
334 /*
335 * MOVE_LEN opcode is not available in all SEC HW revisions,
336 * thus need to do some magic, i.e. self-patch the descriptor
337 * buffer.
338 */
339 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
340 MOVE_DEST_MATH3 |
341 (0x6 << MOVE_LEN_SHIFT));
342 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
343 MOVE_DEST_DESCBUF |
344 MOVE_WAITCOMP |
345 (0x8 << MOVE_LEN_SHIFT));
346
347 /* Class 2 operation */
348 append_operation(desc, ctx->class2_alg_type |
349 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
350
351 /* Read and write cryptlen bytes */
352 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
353
354 set_move_tgt_here(desc, read_move_cmd);
355 set_move_tgt_here(desc, write_move_cmd);
356 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
357 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
358 MOVE_AUX_LS);
359
360 /* Write ICV */
361 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
362 LDST_SRCDST_BYTE_CONTEXT);
363
364 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
365 desc_bytes(desc),
366 DMA_TO_DEVICE);
367 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
368 dev_err(jrdev, "unable to map shared descriptor\n");
369 return -ENOMEM;
370 }
371#ifdef DEBUG
372 print_hex_dump(KERN_ERR,
373 "aead null enc shdesc@"__stringify(__LINE__)": ",
374 DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 desc_bytes(desc), 1);
376#endif
377
378 /*
379 * Job Descriptor and Shared Descriptors
380 * must all fit into the 64-word Descriptor h/w Buffer
381 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500382 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200383 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
384 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
385 keys_fit_inline = true;
386
387 desc = ctx->sh_desc_dec;
388
Herbert Xu479bcc72015-07-30 17:53:17 +0800389 /* aead_decrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200390 init_sh_desc(desc, HDR_SHARE_SERIAL);
391
392 /* Skip if already shared */
393 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
394 JUMP_COND_SHRD);
395 if (keys_fit_inline)
396 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
397 ctx->split_key_len, CLASS_2 |
398 KEY_DEST_MDHA_SPLIT | KEY_ENC);
399 else
400 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
401 KEY_DEST_MDHA_SPLIT | KEY_ENC);
402 set_jump_tgt_here(desc, key_jump_cmd);
403
404 /* Class 2 operation */
405 append_operation(desc, ctx->class2_alg_type |
406 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
407
Herbert Xu479bcc72015-07-30 17:53:17 +0800408 /* assoclen + cryptlen = seqoutlen */
Horia Geantaae4a8252014-03-14 17:46:52 +0200409 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Horia Geantaae4a8252014-03-14 17:46:52 +0200410
Herbert Xu479bcc72015-07-30 17:53:17 +0800411 /* Prepare to read and write cryptlen + assoclen bytes */
Horia Geantaae4a8252014-03-14 17:46:52 +0200412 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
413 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
414
415 /*
416 * MOVE_LEN opcode is not available in all SEC HW revisions,
417 * thus need to do some magic, i.e. self-patch the descriptor
418 * buffer.
419 */
420 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
421 MOVE_DEST_MATH2 |
422 (0x6 << MOVE_LEN_SHIFT));
423 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
424 MOVE_DEST_DESCBUF |
425 MOVE_WAITCOMP |
426 (0x8 << MOVE_LEN_SHIFT));
427
428 /* Read and write cryptlen bytes */
429 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
430
431 /*
432 * Insert a NOP here, since we need at least 4 instructions between
433 * code patching the descriptor buffer and the location being patched.
434 */
435 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
436 set_jump_tgt_here(desc, jump_cmd);
437
438 set_move_tgt_here(desc, read_move_cmd);
439 set_move_tgt_here(desc, write_move_cmd);
440 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
441 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
442 MOVE_AUX_LS);
443 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
444
445 /* Load ICV */
446 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
447 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
448
449 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
450 desc_bytes(desc),
451 DMA_TO_DEVICE);
452 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
453 dev_err(jrdev, "unable to map shared descriptor\n");
454 return -ENOMEM;
455 }
456#ifdef DEBUG
457 print_hex_dump(KERN_ERR,
458 "aead null dec shdesc@"__stringify(__LINE__)": ",
459 DUMP_PREFIX_ADDRESS, 16, 4, desc,
460 desc_bytes(desc), 1);
461#endif
462
463 return 0;
464}
465
Yuan Kang1acebad2011-07-15 11:21:42 +0800466static int aead_set_sh_desc(struct crypto_aead *aead)
467{
Herbert Xu479bcc72015-07-30 17:53:17 +0800468 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
469 struct caam_aead_alg, aead);
Herbert Xuadd86d52015-05-11 17:47:50 +0800470 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800471 struct caam_ctx *ctx = crypto_aead_ctx(aead);
472 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200473 bool keys_fit_inline;
Yuan Kang1acebad2011-07-15 11:21:42 +0800474 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200475 u32 ctx1_iv_off = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +0800476 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200477 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
478 OP_ALG_AAI_CTR_MOD128);
Herbert Xu479bcc72015-07-30 17:53:17 +0800479 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +0800480
Horia Geantă2fdea252016-08-04 20:02:47 +0300481 if (!ctx->authsize)
482 return 0;
483
Horia Geantaae4a8252014-03-14 17:46:52 +0200484 /* NULL encryption / decryption */
485 if (!ctx->enckeylen)
486 return aead_null_set_sh_desc(aead);
487
Yuan Kang1acebad2011-07-15 11:21:42 +0800488 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200489 * AES-CTR needs to load IV in CONTEXT1 reg
490 * at an offset of 128bits (16bytes)
491 * CONTEXT1[255:128] = IV
492 */
493 if (ctr_mode)
494 ctx1_iv_off = 16;
495
496 /*
497 * RFC3686 specific:
498 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
499 */
500 if (is_rfc3686)
501 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
502
Herbert Xu479bcc72015-07-30 17:53:17 +0800503 if (alg->caam.geniv)
504 goto skip_enc;
505
Catalin Vasiledaebc462014-10-31 12:45:37 +0200506 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800507 * Job Descriptor and Shared Descriptors
508 * must all fit into the 64-word Descriptor h/w Buffer
509 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200510 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800511 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200512 ctx->split_key_pad_len + ctx->enckeylen +
513 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800514 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800515 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800516
Herbert Xu479bcc72015-07-30 17:53:17 +0800517 /* aead_encrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800518 desc = ctx->sh_desc_enc;
519
Catalin Vasiledaebc462014-10-31 12:45:37 +0200520 /* Note: Context registers are saved. */
521 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800522
523 /* Class 2 operation */
524 append_operation(desc, ctx->class2_alg_type |
525 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
526
Herbert Xu479bcc72015-07-30 17:53:17 +0800527 /* Read and write assoclen bytes */
528 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
529 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800530
Herbert Xu479bcc72015-07-30 17:53:17 +0800531 /* Skip assoc data */
532 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800533
534 /* read assoc before reading payload */
535 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
Herbert Xu479bcc72015-07-30 17:53:17 +0800536 FIFOLDST_VLF);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200537
538 /* Load Counter into CONTEXT1 reg */
539 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300540 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
541 LDST_SRCDST_BYTE_CONTEXT |
542 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
543 LDST_OFFSET_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800544
545 /* Class 1 operation */
546 append_operation(desc, ctx->class1_alg_type |
547 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
548
549 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800550 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
551 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800552 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
553
554 /* Write ICV */
555 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
556 LDST_SRCDST_BYTE_CONTEXT);
557
558 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
559 desc_bytes(desc),
560 DMA_TO_DEVICE);
561 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
562 dev_err(jrdev, "unable to map shared descriptor\n");
563 return -ENOMEM;
564 }
565#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300566 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800567 DUMP_PREFIX_ADDRESS, 16, 4, desc,
568 desc_bytes(desc), 1);
569#endif
570
Herbert Xu479bcc72015-07-30 17:53:17 +0800571skip_enc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800572 /*
573 * Job Descriptor and Shared Descriptors
574 * must all fit into the 64-word Descriptor h/w Buffer
575 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500576 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800577 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200578 ctx->split_key_pad_len + ctx->enckeylen +
579 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800580 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800581 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800582
Herbert Xu479bcc72015-07-30 17:53:17 +0800583 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800584 desc = ctx->sh_desc_dec;
585
Catalin Vasiledaebc462014-10-31 12:45:37 +0200586 /* Note: Context registers are saved. */
587 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800588
589 /* Class 2 operation */
590 append_operation(desc, ctx->class2_alg_type |
591 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
592
Herbert Xu479bcc72015-07-30 17:53:17 +0800593 /* Read and write assoclen bytes */
594 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Horia Geantă8b18e232016-08-29 14:52:14 +0300595 if (alg->caam.geniv)
596 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
597 else
598 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Herbert Xu479bcc72015-07-30 17:53:17 +0800599
600 /* Skip assoc data */
601 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800602
603 /* read assoc before reading payload */
604 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
605 KEY_VLF);
606
Horia Geantă8b18e232016-08-29 14:52:14 +0300607 if (alg->caam.geniv) {
608 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
609 LDST_SRCDST_BYTE_CONTEXT |
610 (ctx1_iv_off << LDST_OFFSET_SHIFT));
611 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
612 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
613 }
614
Catalin Vasiledaebc462014-10-31 12:45:37 +0200615 /* Load Counter into CONTEXT1 reg */
616 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300617 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
618 LDST_SRCDST_BYTE_CONTEXT |
619 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
620 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200621
622 /* Choose operation */
623 if (ctr_mode)
624 append_operation(desc, ctx->class1_alg_type |
625 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
626 else
627 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad2011-07-15 11:21:42 +0800628
629 /* Read and write cryptlen bytes */
Herbert Xu479bcc72015-07-30 17:53:17 +0800630 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
631 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Yuan Kang1acebad2011-07-15 11:21:42 +0800632 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
633
634 /* Load ICV */
635 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
636 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800637
638 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
639 desc_bytes(desc),
640 DMA_TO_DEVICE);
641 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
642 dev_err(jrdev, "unable to map shared descriptor\n");
643 return -ENOMEM;
644 }
645#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300646 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800647 DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 desc_bytes(desc), 1);
649#endif
650
Herbert Xu479bcc72015-07-30 17:53:17 +0800651 if (!alg->caam.geniv)
652 goto skip_givenc;
653
Yuan Kang1acebad2011-07-15 11:21:42 +0800654 /*
655 * Job Descriptor and Shared Descriptors
656 * must all fit into the 64-word Descriptor h/w Buffer
657 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500658 keys_fit_inline = false;
Herbert Xu479bcc72015-07-30 17:53:17 +0800659 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200660 ctx->split_key_pad_len + ctx->enckeylen +
661 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800662 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800663 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800664
665 /* aead_givencrypt shared descriptor */
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300666 desc = ctx->sh_desc_enc;
Yuan Kang1acebad2011-07-15 11:21:42 +0800667
Catalin Vasiledaebc462014-10-31 12:45:37 +0200668 /* Note: Context registers are saved. */
669 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800670
Herbert Xu479bcc72015-07-30 17:53:17 +0800671 if (is_rfc3686)
672 goto copy_iv;
673
Yuan Kang1acebad2011-07-15 11:21:42 +0800674 /* Generate IV */
675 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
676 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800677 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800678 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
679 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
680 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200681 append_move(desc, MOVE_WAITCOMP |
682 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
683 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800684 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800685 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
686
Herbert Xu479bcc72015-07-30 17:53:17 +0800687copy_iv:
Yuan Kang1acebad2011-07-15 11:21:42 +0800688 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200689 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
690 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800691 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800692
693 /* Return to encryption */
694 append_operation(desc, ctx->class2_alg_type |
695 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696
Herbert Xu479bcc72015-07-30 17:53:17 +0800697 /* Read and write assoclen bytes */
698 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
699 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
700
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300701 /* ivsize + cryptlen = seqoutlen - authsize */
702 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
703
Herbert Xu479bcc72015-07-30 17:53:17 +0800704 /* Skip assoc data */
705 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800706
707 /* read assoc before reading payload */
708 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
709 KEY_VLF);
710
Catalin Vasiledaebc462014-10-31 12:45:37 +0200711 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad2011-07-15 11:21:42 +0800712 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800713 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad2011-07-15 11:21:42 +0800714 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
715 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
Herbert Xuadd86d52015-05-11 17:47:50 +0800716 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
Yuan Kang1acebad2011-07-15 11:21:42 +0800717 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
718
Catalin Vasiledaebc462014-10-31 12:45:37 +0200719 /* Load Counter into CONTEXT1 reg */
720 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +0300721 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
722 LDST_SRCDST_BYTE_CONTEXT |
723 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
724 LDST_OFFSET_SHIFT));
Catalin Vasiledaebc462014-10-31 12:45:37 +0200725
Yuan Kang1acebad2011-07-15 11:21:42 +0800726 /* Class 1 operation */
727 append_operation(desc, ctx->class1_alg_type |
728 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
729
730 /* Will write ivsize + cryptlen */
731 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
732
733 /* Not need to reload iv */
Herbert Xuadd86d52015-05-11 17:47:50 +0800734 append_seq_fifo_load(desc, ivsize,
Yuan Kang1acebad2011-07-15 11:21:42 +0800735 FIFOLD_CLASS_SKIP);
736
737 /* Will read cryptlen */
738 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Alex Porosanud128af12016-11-09 10:46:11 +0200739 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
740 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
741 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800742
743 /* Write ICV */
744 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
745 LDST_SRCDST_BYTE_CONTEXT);
746
Herbert Xu479bcc72015-07-30 17:53:17 +0800747 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
748 desc_bytes(desc),
749 DMA_TO_DEVICE);
Horia Geantă1d2d87e2016-08-04 20:02:46 +0300750 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
Yuan Kang1acebad2011-07-15 11:21:42 +0800751 dev_err(jrdev, "unable to map shared descriptor\n");
752 return -ENOMEM;
753 }
754#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300755 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800756 DUMP_PREFIX_ADDRESS, 16, 4, desc,
757 desc_bytes(desc), 1);
758#endif
759
Herbert Xu479bcc72015-07-30 17:53:17 +0800760skip_givenc:
Yuan Kang1acebad2011-07-15 11:21:42 +0800761 return 0;
762}
763
Yuan Kang0e479302011-07-15 11:21:41 +0800764static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800765 unsigned int authsize)
766{
767 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
768
769 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800770 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800771
772 return 0;
773}
774
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300775static int gcm_set_sh_desc(struct crypto_aead *aead)
776{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300777 struct caam_ctx *ctx = crypto_aead_ctx(aead);
778 struct device *jrdev = ctx->jrdev;
779 bool keys_fit_inline = false;
780 u32 *key_jump_cmd, *zero_payload_jump_cmd,
781 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
782 u32 *desc;
783
784 if (!ctx->enckeylen || !ctx->authsize)
785 return 0;
786
787 /*
788 * AES GCM encrypt shared descriptor
789 * Job Descriptor and Shared Descriptor
790 * must fit into the 64-word Descriptor h/w Buffer
791 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800792 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300793 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
794 keys_fit_inline = true;
795
796 desc = ctx->sh_desc_enc;
797
798 init_sh_desc(desc, HDR_SHARE_SERIAL);
799
800 /* skip key loading if they are loaded due to sharing */
801 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
802 JUMP_COND_SHRD | JUMP_COND_SELF);
803 if (keys_fit_inline)
804 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
805 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
806 else
807 append_key(desc, ctx->key_dma, ctx->enckeylen,
808 CLASS_1 | KEY_DEST_CLASS_REG);
809 set_jump_tgt_here(desc, key_jump_cmd);
810
811 /* class 1 operation */
812 append_operation(desc, ctx->class1_alg_type |
813 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
814
Herbert Xuf2147b82015-06-16 13:54:23 +0800815 /* if assoclen + cryptlen is ZERO, skip to ICV write */
816 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
817 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
818 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300819
820 /* if assoclen is ZERO, skip reading the assoc data */
Herbert Xuf2147b82015-06-16 13:54:23 +0800821 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300822 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800823 JUMP_COND_MATH_Z);
824
825 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
826
827 /* skip assoc data */
828 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
829
830 /* cryptlen = seqinlen - assoclen */
831 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
832
833 /* if cryptlen is ZERO jump to zero-payload commands */
834 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
835 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300836
837 /* read assoc data */
838 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
839 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
840 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
841
Herbert Xuf2147b82015-06-16 13:54:23 +0800842 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300843
844 /* write encrypted data */
845 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
846
847 /* read payload data */
848 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
849 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
850
851 /* jump the zero-payload commands */
Herbert Xuf2147b82015-06-16 13:54:23 +0800852 append_jump(desc, JUMP_TEST_ALL | 2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300853
854 /* zero-payload commands */
855 set_jump_tgt_here(desc, zero_payload_jump_cmd);
856
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300857 /* read assoc data */
858 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
859 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
860
Herbert Xuf2147b82015-06-16 13:54:23 +0800861 /* There is no input data */
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300862 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300863
864 /* write ICV */
865 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
866 LDST_SRCDST_BYTE_CONTEXT);
867
868 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
869 desc_bytes(desc),
870 DMA_TO_DEVICE);
871 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
872 dev_err(jrdev, "unable to map shared descriptor\n");
873 return -ENOMEM;
874 }
875#ifdef DEBUG
876 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
877 DUMP_PREFIX_ADDRESS, 16, 4, desc,
878 desc_bytes(desc), 1);
879#endif
880
881 /*
882 * Job Descriptor and Shared Descriptors
883 * must all fit into the 64-word Descriptor h/w Buffer
884 */
885 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800886 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300887 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
888 keys_fit_inline = true;
889
890 desc = ctx->sh_desc_dec;
891
892 init_sh_desc(desc, HDR_SHARE_SERIAL);
893
894 /* skip key loading if they are loaded due to sharing */
895 key_jump_cmd = append_jump(desc, JUMP_JSL |
896 JUMP_TEST_ALL | JUMP_COND_SHRD |
897 JUMP_COND_SELF);
898 if (keys_fit_inline)
899 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
900 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
901 else
902 append_key(desc, ctx->key_dma, ctx->enckeylen,
903 CLASS_1 | KEY_DEST_CLASS_REG);
904 set_jump_tgt_here(desc, key_jump_cmd);
905
906 /* class 1 operation */
907 append_operation(desc, ctx->class1_alg_type |
908 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
909
Herbert Xuf2147b82015-06-16 13:54:23 +0800910 /* if assoclen is ZERO, skip reading the assoc data */
911 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300912 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800913 JUMP_COND_MATH_Z);
914
915 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
916
917 /* skip assoc data */
918 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
919
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300920 /* read assoc data */
921 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
922 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
Herbert Xuf2147b82015-06-16 13:54:23 +0800923
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300924 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
925
Herbert Xuf2147b82015-06-16 13:54:23 +0800926 /* cryptlen = seqoutlen - assoclen */
927 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
928
929 /* jump to zero-payload command if cryptlen is zero */
930 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
931 JUMP_COND_MATH_Z);
932
933 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300934
935 /* store encrypted data */
936 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
937
938 /* read payload data */
939 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
940 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
941
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300942 /* zero-payload command */
943 set_jump_tgt_here(desc, zero_payload_jump_cmd);
944
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300945 /* read ICV */
946 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
947 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
948
949 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
950 desc_bytes(desc),
951 DMA_TO_DEVICE);
952 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
953 dev_err(jrdev, "unable to map shared descriptor\n");
954 return -ENOMEM;
955 }
956#ifdef DEBUG
957 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
958 DUMP_PREFIX_ADDRESS, 16, 4, desc,
959 desc_bytes(desc), 1);
960#endif
961
962 return 0;
963}
964
965static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
966{
967 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
968
969 ctx->authsize = authsize;
970 gcm_set_sh_desc(authenc);
971
972 return 0;
973}
974
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300975static int rfc4106_set_sh_desc(struct crypto_aead *aead)
976{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300977 struct caam_ctx *ctx = crypto_aead_ctx(aead);
978 struct device *jrdev = ctx->jrdev;
979 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800980 u32 *key_jump_cmd;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300981 u32 *desc;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300982
983 if (!ctx->enckeylen || !ctx->authsize)
984 return 0;
985
986 /*
987 * RFC4106 encrypt shared descriptor
988 * Job Descriptor and Shared Descriptor
989 * must fit into the 64-word Descriptor h/w Buffer
990 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800991 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300992 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
993 keys_fit_inline = true;
994
995 desc = ctx->sh_desc_enc;
996
997 init_sh_desc(desc, HDR_SHARE_SERIAL);
998
999 /* Skip key loading if it is loaded due to sharing */
1000 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1001 JUMP_COND_SHRD);
1002 if (keys_fit_inline)
1003 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1004 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1005 else
1006 append_key(desc, ctx->key_dma, ctx->enckeylen,
1007 CLASS_1 | KEY_DEST_CLASS_REG);
1008 set_jump_tgt_here(desc, key_jump_cmd);
1009
1010 /* Class 1 operation */
1011 append_operation(desc, ctx->class1_alg_type |
1012 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1013
Herbert Xu46218752015-07-09 07:17:33 +08001014 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001015 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1016
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001017 /* Read assoc data */
1018 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1019 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1020
Herbert Xu46218752015-07-09 07:17:33 +08001021 /* Skip IV */
1022 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001023
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001024 /* Will read cryptlen bytes */
Herbert Xuf2147b82015-06-16 13:54:23 +08001025 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001026
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001027 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1028 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001029
Herbert Xu46218752015-07-09 07:17:33 +08001030 /* Skip assoc data */
1031 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1032
1033 /* cryptlen = seqoutlen - assoclen */
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001034 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
Herbert Xu46218752015-07-09 07:17:33 +08001035
1036 /* Write encrypted data */
1037 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1038
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001039 /* Read payload data */
1040 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1041 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1042
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001043 /* Write ICV */
1044 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1045 LDST_SRCDST_BYTE_CONTEXT);
1046
1047 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1048 desc_bytes(desc),
1049 DMA_TO_DEVICE);
1050 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1051 dev_err(jrdev, "unable to map shared descriptor\n");
1052 return -ENOMEM;
1053 }
1054#ifdef DEBUG
1055 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1056 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1057 desc_bytes(desc), 1);
1058#endif
1059
1060 /*
1061 * Job Descriptor and Shared Descriptors
1062 * must all fit into the 64-word Descriptor h/w Buffer
1063 */
1064 keys_fit_inline = false;
1065 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1066 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1067 keys_fit_inline = true;
1068
1069 desc = ctx->sh_desc_dec;
1070
1071 init_sh_desc(desc, HDR_SHARE_SERIAL);
1072
1073 /* Skip key loading if it is loaded due to sharing */
1074 key_jump_cmd = append_jump(desc, JUMP_JSL |
1075 JUMP_TEST_ALL | JUMP_COND_SHRD);
1076 if (keys_fit_inline)
1077 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1078 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1079 else
1080 append_key(desc, ctx->key_dma, ctx->enckeylen,
1081 CLASS_1 | KEY_DEST_CLASS_REG);
1082 set_jump_tgt_here(desc, key_jump_cmd);
1083
1084 /* Class 1 operation */
1085 append_operation(desc, ctx->class1_alg_type |
1086 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1087
Herbert Xu46218752015-07-09 07:17:33 +08001088 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Herbert Xuf2147b82015-06-16 13:54:23 +08001089 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001090
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001091 /* Read assoc data */
1092 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1093 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1094
Herbert Xu46218752015-07-09 07:17:33 +08001095 /* Skip IV */
1096 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001097
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001098 /* Will read cryptlen bytes */
Herbert Xu46218752015-07-09 07:17:33 +08001099 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001100
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001101 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1102 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001103
Herbert Xu46218752015-07-09 07:17:33 +08001104 /* Skip assoc data */
1105 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1106
1107 /* Will write cryptlen bytes */
1108 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1109
1110 /* Store payload data */
1111 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1112
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001113 /* Read encrypted data */
1114 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1115 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1116
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001117 /* Read ICV */
1118 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1119 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1120
1121 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1122 desc_bytes(desc),
1123 DMA_TO_DEVICE);
1124 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1125 dev_err(jrdev, "unable to map shared descriptor\n");
1126 return -ENOMEM;
1127 }
1128#ifdef DEBUG
1129 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1130 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1131 desc_bytes(desc), 1);
1132#endif
1133
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001134 return 0;
1135}
1136
1137static int rfc4106_setauthsize(struct crypto_aead *authenc,
1138 unsigned int authsize)
1139{
1140 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1141
1142 ctx->authsize = authsize;
1143 rfc4106_set_sh_desc(authenc);
1144
1145 return 0;
1146}
1147
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001148static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1149{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001150 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1151 struct device *jrdev = ctx->jrdev;
1152 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001153 u32 *key_jump_cmd;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001154 u32 *read_move_cmd, *write_move_cmd;
1155 u32 *desc;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001156
1157 if (!ctx->enckeylen || !ctx->authsize)
1158 return 0;
1159
1160 /*
1161 * RFC4543 encrypt shared descriptor
1162 * Job Descriptor and Shared Descriptor
1163 * must fit into the 64-word Descriptor h/w Buffer
1164 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001165 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001166 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1167 keys_fit_inline = true;
1168
1169 desc = ctx->sh_desc_enc;
1170
1171 init_sh_desc(desc, HDR_SHARE_SERIAL);
1172
1173 /* Skip key loading if it is loaded due to sharing */
1174 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1175 JUMP_COND_SHRD);
1176 if (keys_fit_inline)
1177 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1178 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1179 else
1180 append_key(desc, ctx->key_dma, ctx->enckeylen,
1181 CLASS_1 | KEY_DEST_CLASS_REG);
1182 set_jump_tgt_here(desc, key_jump_cmd);
1183
1184 /* Class 1 operation */
1185 append_operation(desc, ctx->class1_alg_type |
1186 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1187
Herbert Xuf2147b82015-06-16 13:54:23 +08001188 /* assoclen + cryptlen = seqinlen */
1189 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001190
1191 /*
1192 * MOVE_LEN opcode is not available in all SEC HW revisions,
1193 * thus need to do some magic, i.e. self-patch the descriptor
1194 * buffer.
1195 */
1196 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1197 (0x6 << MOVE_LEN_SHIFT));
1198 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1199 (0x8 << MOVE_LEN_SHIFT));
1200
Herbert Xuf2147b82015-06-16 13:54:23 +08001201 /* Will read assoclen + cryptlen bytes */
1202 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001203
Herbert Xuf2147b82015-06-16 13:54:23 +08001204 /* Will write assoclen + cryptlen bytes */
1205 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1206
1207 /* Read and write assoclen + cryptlen bytes */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001208 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1209
1210 set_move_tgt_here(desc, read_move_cmd);
1211 set_move_tgt_here(desc, write_move_cmd);
1212 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1213 /* Move payload data to OFIFO */
1214 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1215
1216 /* Write ICV */
1217 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1218 LDST_SRCDST_BYTE_CONTEXT);
1219
1220 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1221 desc_bytes(desc),
1222 DMA_TO_DEVICE);
1223 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1224 dev_err(jrdev, "unable to map shared descriptor\n");
1225 return -ENOMEM;
1226 }
1227#ifdef DEBUG
1228 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1229 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1230 desc_bytes(desc), 1);
1231#endif
1232
1233 /*
1234 * Job Descriptor and Shared Descriptors
1235 * must all fit into the 64-word Descriptor h/w Buffer
1236 */
1237 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001238 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001239 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1240 keys_fit_inline = true;
1241
1242 desc = ctx->sh_desc_dec;
1243
1244 init_sh_desc(desc, HDR_SHARE_SERIAL);
1245
1246 /* Skip key loading if it is loaded due to sharing */
1247 key_jump_cmd = append_jump(desc, JUMP_JSL |
1248 JUMP_TEST_ALL | JUMP_COND_SHRD);
1249 if (keys_fit_inline)
1250 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1251 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1252 else
1253 append_key(desc, ctx->key_dma, ctx->enckeylen,
1254 CLASS_1 | KEY_DEST_CLASS_REG);
1255 set_jump_tgt_here(desc, key_jump_cmd);
1256
1257 /* Class 1 operation */
1258 append_operation(desc, ctx->class1_alg_type |
1259 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1260
Herbert Xuf2147b82015-06-16 13:54:23 +08001261 /* assoclen + cryptlen = seqoutlen */
1262 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001263
1264 /*
1265 * MOVE_LEN opcode is not available in all SEC HW revisions,
1266 * thus need to do some magic, i.e. self-patch the descriptor
1267 * buffer.
1268 */
1269 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1270 (0x6 << MOVE_LEN_SHIFT));
1271 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1272 (0x8 << MOVE_LEN_SHIFT));
1273
Herbert Xuf2147b82015-06-16 13:54:23 +08001274 /* Will read assoclen + cryptlen bytes */
1275 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001276
Herbert Xuf2147b82015-06-16 13:54:23 +08001277 /* Will write assoclen + cryptlen bytes */
1278 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001279
1280 /* Store payload data */
1281 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1282
Herbert Xuf2147b82015-06-16 13:54:23 +08001283 /* In-snoop assoclen + cryptlen data */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001284 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1285 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1286
1287 set_move_tgt_here(desc, read_move_cmd);
1288 set_move_tgt_here(desc, write_move_cmd);
1289 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1290 /* Move payload data to OFIFO */
1291 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1292 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1293
1294 /* Read ICV */
1295 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1296 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1297
1298 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1299 desc_bytes(desc),
1300 DMA_TO_DEVICE);
1301 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1302 dev_err(jrdev, "unable to map shared descriptor\n");
1303 return -ENOMEM;
1304 }
1305#ifdef DEBUG
1306 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1307 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1308 desc_bytes(desc), 1);
1309#endif
1310
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001311 return 0;
1312}
1313
1314static int rfc4543_setauthsize(struct crypto_aead *authenc,
1315 unsigned int authsize)
1316{
1317 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1318
1319 ctx->authsize = authsize;
1320 rfc4543_set_sh_desc(authenc);
1321
1322 return 0;
1323}
1324
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001325static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1326 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001327{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001328 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1329 ctx->split_key_pad_len, key_in, authkeylen,
1330 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001331}
1332
Yuan Kang0e479302011-07-15 11:21:41 +08001333static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001334 const u8 *key, unsigned int keylen)
1335{
1336 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1337 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1338 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1339 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001340 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001341 int ret = 0;
1342
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001343 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001344 goto badkey;
1345
1346 /* Pick class 2 key length from algorithm submask */
1347 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1348 OP_ALG_ALGSEL_SHIFT] * 2;
1349 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1350
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001351 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1352 goto badkey;
1353
Kim Phillips8e8ec592011-03-13 16:54:26 +08001354#ifdef DEBUG
1355 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001356 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1357 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001358 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1359 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001360 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001361 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1362#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001363
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001364 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001365 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001366 goto badkey;
1367 }
1368
1369 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001370 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001371
Yuan Kang885e9e22011-07-15 11:21:41 +08001372 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001373 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001374 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001375 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001376 return -ENOMEM;
1377 }
1378#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001379 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001380 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001381 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001382#endif
1383
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001384 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001385
Yuan Kang1acebad2011-07-15 11:21:42 +08001386 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001387 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001388 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001389 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001390 }
1391
1392 return ret;
1393badkey:
1394 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1395 return -EINVAL;
1396}
1397
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001398static int gcm_setkey(struct crypto_aead *aead,
1399 const u8 *key, unsigned int keylen)
1400{
1401 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1402 struct device *jrdev = ctx->jrdev;
1403 int ret = 0;
1404
1405#ifdef DEBUG
1406 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1407 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1408#endif
1409
1410 memcpy(ctx->key, key, keylen);
1411 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1412 DMA_TO_DEVICE);
1413 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1414 dev_err(jrdev, "unable to map key i/o memory\n");
1415 return -ENOMEM;
1416 }
1417 ctx->enckeylen = keylen;
1418
1419 ret = gcm_set_sh_desc(aead);
1420 if (ret) {
1421 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1422 DMA_TO_DEVICE);
1423 }
1424
1425 return ret;
1426}
1427
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001428static int rfc4106_setkey(struct crypto_aead *aead,
1429 const u8 *key, unsigned int keylen)
1430{
1431 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1432 struct device *jrdev = ctx->jrdev;
1433 int ret = 0;
1434
1435 if (keylen < 4)
1436 return -EINVAL;
1437
1438#ifdef DEBUG
1439 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1440 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1441#endif
1442
1443 memcpy(ctx->key, key, keylen);
1444
1445 /*
1446 * The last four bytes of the key material are used as the salt value
1447 * in the nonce. Update the AES key length.
1448 */
1449 ctx->enckeylen = keylen - 4;
1450
1451 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1452 DMA_TO_DEVICE);
1453 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1454 dev_err(jrdev, "unable to map key i/o memory\n");
1455 return -ENOMEM;
1456 }
1457
1458 ret = rfc4106_set_sh_desc(aead);
1459 if (ret) {
1460 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1461 DMA_TO_DEVICE);
1462 }
1463
1464 return ret;
1465}
1466
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001467static int rfc4543_setkey(struct crypto_aead *aead,
1468 const u8 *key, unsigned int keylen)
1469{
1470 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1471 struct device *jrdev = ctx->jrdev;
1472 int ret = 0;
1473
1474 if (keylen < 4)
1475 return -EINVAL;
1476
1477#ifdef DEBUG
1478 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1479 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1480#endif
1481
1482 memcpy(ctx->key, key, keylen);
1483
1484 /*
1485 * The last four bytes of the key material are used as the salt value
1486 * in the nonce. Update the AES key length.
1487 */
1488 ctx->enckeylen = keylen - 4;
1489
1490 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1491 DMA_TO_DEVICE);
1492 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1493 dev_err(jrdev, "unable to map key i/o memory\n");
1494 return -ENOMEM;
1495 }
1496
1497 ret = rfc4543_set_sh_desc(aead);
1498 if (ret) {
1499 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1500 DMA_TO_DEVICE);
1501 }
1502
1503 return ret;
1504}
1505
Yuan Kangacdca312011-07-15 11:21:42 +08001506static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1507 const u8 *key, unsigned int keylen)
1508{
1509 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001510 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1511 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1512 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001513 struct device *jrdev = ctx->jrdev;
1514 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001515 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001516 u32 *desc;
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001517 u8 *nonce;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001518 u32 geniv;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001519 u32 ctx1_iv_off = 0;
1520 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1521 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001522 const bool is_rfc3686 = (ctr_mode &&
1523 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001524
1525#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001526 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001527 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1528#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001529 /*
1530 * AES-CTR needs to load IV in CONTEXT1 reg
1531 * at an offset of 128bits (16bytes)
1532 * CONTEXT1[255:128] = IV
1533 */
1534 if (ctr_mode)
1535 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001536
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001537 /*
1538 * RFC3686 specific:
1539 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1540 * | *key = {KEY, NONCE}
1541 */
1542 if (is_rfc3686) {
1543 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1544 keylen -= CTR_RFC3686_NONCE_SIZE;
1545 }
1546
Yuan Kangacdca312011-07-15 11:21:42 +08001547 memcpy(ctx->key, key, keylen);
1548 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1549 DMA_TO_DEVICE);
1550 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1551 dev_err(jrdev, "unable to map key i/o memory\n");
1552 return -ENOMEM;
1553 }
1554 ctx->enckeylen = keylen;
1555
1556 /* ablkcipher_encrypt shared descriptor */
1557 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001558 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001559 /* Skip if already shared */
1560 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1561 JUMP_COND_SHRD);
1562
1563 /* Load class1 key only */
1564 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1565 ctx->enckeylen, CLASS_1 |
1566 KEY_DEST_CLASS_REG);
1567
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001568 /* Load nonce into CONTEXT1 reg */
1569 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001570 nonce = (u8 *)key + keylen;
1571 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1572 LDST_CLASS_IND_CCB |
1573 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001574 append_move(desc, MOVE_WAITCOMP |
1575 MOVE_SRC_OUTFIFO |
1576 MOVE_DEST_CLASS1CTX |
1577 (16 << MOVE_OFFSET_SHIFT) |
1578 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1579 }
1580
Yuan Kangacdca312011-07-15 11:21:42 +08001581 set_jump_tgt_here(desc, key_jump_cmd);
1582
Yuan Kangacdca312011-07-15 11:21:42 +08001583 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001584 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001585 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001586
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001587 /* Load counter into CONTEXT1 reg */
1588 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001589 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1590 LDST_SRCDST_BYTE_CONTEXT |
1591 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1592 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001593
Yuan Kangacdca312011-07-15 11:21:42 +08001594 /* Load operation */
1595 append_operation(desc, ctx->class1_alg_type |
1596 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1597
1598 /* Perform operation */
1599 ablkcipher_append_src_dst(desc);
1600
1601 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1602 desc_bytes(desc),
1603 DMA_TO_DEVICE);
1604 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1605 dev_err(jrdev, "unable to map shared descriptor\n");
1606 return -ENOMEM;
1607 }
1608#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001609 print_hex_dump(KERN_ERR,
1610 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001611 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1612 desc_bytes(desc), 1);
1613#endif
1614 /* ablkcipher_decrypt shared descriptor */
1615 desc = ctx->sh_desc_dec;
1616
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001617 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001618 /* Skip if already shared */
1619 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1620 JUMP_COND_SHRD);
1621
1622 /* Load class1 key only */
1623 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1624 ctx->enckeylen, CLASS_1 |
1625 KEY_DEST_CLASS_REG);
1626
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001627 /* Load nonce into CONTEXT1 reg */
1628 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001629 nonce = (u8 *)key + keylen;
1630 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1631 LDST_CLASS_IND_CCB |
1632 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001633 append_move(desc, MOVE_WAITCOMP |
1634 MOVE_SRC_OUTFIFO |
1635 MOVE_DEST_CLASS1CTX |
1636 (16 << MOVE_OFFSET_SHIFT) |
1637 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1638 }
1639
Yuan Kangacdca312011-07-15 11:21:42 +08001640 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001641
1642 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001643 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001644 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001645
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001646 /* Load counter into CONTEXT1 reg */
1647 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001648 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1649 LDST_SRCDST_BYTE_CONTEXT |
1650 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1651 LDST_OFFSET_SHIFT));
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001652
Yuan Kangacdca312011-07-15 11:21:42 +08001653 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001654 if (ctr_mode)
1655 append_operation(desc, ctx->class1_alg_type |
1656 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1657 else
1658 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001659
1660 /* Perform operation */
1661 ablkcipher_append_src_dst(desc);
1662
Yuan Kangacdca312011-07-15 11:21:42 +08001663 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1664 desc_bytes(desc),
1665 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001666 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001667 dev_err(jrdev, "unable to map shared descriptor\n");
1668 return -ENOMEM;
1669 }
1670
1671#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001672 print_hex_dump(KERN_ERR,
1673 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001674 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1675 desc_bytes(desc), 1);
1676#endif
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001677 /* ablkcipher_givencrypt shared descriptor */
1678 desc = ctx->sh_desc_givenc;
1679
1680 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1681 /* Skip if already shared */
1682 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1683 JUMP_COND_SHRD);
1684
1685 /* Load class1 key only */
1686 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1687 ctx->enckeylen, CLASS_1 |
1688 KEY_DEST_CLASS_REG);
1689
1690 /* Load Nonce into CONTEXT1 reg */
1691 if (is_rfc3686) {
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001692 nonce = (u8 *)key + keylen;
1693 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1694 LDST_CLASS_IND_CCB |
1695 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001696 append_move(desc, MOVE_WAITCOMP |
1697 MOVE_SRC_OUTFIFO |
1698 MOVE_DEST_CLASS1CTX |
1699 (16 << MOVE_OFFSET_SHIFT) |
1700 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1701 }
1702 set_jump_tgt_here(desc, key_jump_cmd);
1703
1704 /* Generate IV */
1705 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1706 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1707 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1708 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1709 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1710 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1711 append_move(desc, MOVE_WAITCOMP |
1712 MOVE_SRC_INFIFO |
1713 MOVE_DEST_CLASS1CTX |
1714 (crt->ivsize << MOVE_LEN_SHIFT) |
1715 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1716 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1717
1718 /* Copy generated IV to memory */
1719 append_seq_store(desc, crt->ivsize,
1720 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1721 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1722
1723 /* Load Counter into CONTEXT1 reg */
1724 if (is_rfc3686)
Catalin Vasile5ba1c7b2016-08-31 15:57:55 +03001725 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1726 LDST_SRCDST_BYTE_CONTEXT |
1727 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1728 LDST_OFFSET_SHIFT));
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001729
1730 if (ctx1_iv_off)
1731 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1732 (1 << JUMP_OFFSET_SHIFT));
1733
1734 /* Load operation */
1735 append_operation(desc, ctx->class1_alg_type |
1736 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1737
1738 /* Perform operation */
1739 ablkcipher_append_src_dst(desc);
1740
1741 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1742 desc_bytes(desc),
1743 DMA_TO_DEVICE);
1744 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1745 dev_err(jrdev, "unable to map shared descriptor\n");
1746 return -ENOMEM;
1747 }
1748#ifdef DEBUG
1749 print_hex_dump(KERN_ERR,
1750 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1751 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1752 desc_bytes(desc), 1);
1753#endif
Yuan Kangacdca312011-07-15 11:21:42 +08001754
1755 return ret;
1756}
1757
Catalin Vasilec6415a62015-10-02 13:13:18 +03001758static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1759 const u8 *key, unsigned int keylen)
1760{
1761 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1762 struct device *jrdev = ctx->jrdev;
1763 u32 *key_jump_cmd, *desc;
1764 __be64 sector_size = cpu_to_be64(512);
1765
1766 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1767 crypto_ablkcipher_set_flags(ablkcipher,
1768 CRYPTO_TFM_RES_BAD_KEY_LEN);
1769 dev_err(jrdev, "key size mismatch\n");
1770 return -EINVAL;
1771 }
1772
1773 memcpy(ctx->key, key, keylen);
1774 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1775 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1776 dev_err(jrdev, "unable to map key i/o memory\n");
1777 return -ENOMEM;
1778 }
1779 ctx->enckeylen = keylen;
1780
1781 /* xts_ablkcipher_encrypt shared descriptor */
1782 desc = ctx->sh_desc_enc;
1783 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1784 /* Skip if already shared */
1785 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1786 JUMP_COND_SHRD);
1787
1788 /* Load class1 keys only */
1789 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1790 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1791
1792 /* Load sector size with index 40 bytes (0x28) */
1793 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1794 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1795 append_data(desc, (void *)&sector_size, 8);
1796
1797 set_jump_tgt_here(desc, key_jump_cmd);
1798
1799 /*
1800 * create sequence for loading the sector index
1801 * Upper 8B of IV - will be used as sector index
1802 * Lower 8B of IV - will be discarded
1803 */
1804 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1805 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1806 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1807
1808 /* Load operation */
1809 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1810 OP_ALG_ENCRYPT);
1811
1812 /* Perform operation */
1813 ablkcipher_append_src_dst(desc);
1814
1815 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1816 DMA_TO_DEVICE);
1817 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1818 dev_err(jrdev, "unable to map shared descriptor\n");
1819 return -ENOMEM;
1820 }
1821#ifdef DEBUG
1822 print_hex_dump(KERN_ERR,
1823 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1824 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1825#endif
1826
1827 /* xts_ablkcipher_decrypt shared descriptor */
1828 desc = ctx->sh_desc_dec;
1829
1830 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1831 /* Skip if already shared */
1832 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1833 JUMP_COND_SHRD);
1834
1835 /* Load class1 key only */
1836 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1837 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1838
1839 /* Load sector size with index 40 bytes (0x28) */
1840 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1841 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1842 append_data(desc, (void *)&sector_size, 8);
1843
1844 set_jump_tgt_here(desc, key_jump_cmd);
1845
1846 /*
1847 * create sequence for loading the sector index
1848 * Upper 8B of IV - will be used as sector index
1849 * Lower 8B of IV - will be discarded
1850 */
1851 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1852 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1853 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1854
1855 /* Load operation */
1856 append_dec_op1(desc, ctx->class1_alg_type);
1857
1858 /* Perform operation */
1859 ablkcipher_append_src_dst(desc);
1860
1861 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1862 DMA_TO_DEVICE);
1863 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1864 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1865 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1866 dev_err(jrdev, "unable to map shared descriptor\n");
1867 return -ENOMEM;
1868 }
1869#ifdef DEBUG
1870 print_hex_dump(KERN_ERR,
1871 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1872 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1873#endif
1874
1875 return 0;
1876}
1877
Kim Phillips8e8ec592011-03-13 16:54:26 +08001878/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001879 * aead_edesc - s/w-extended aead descriptor
1880 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Kim Phillips8e8ec592011-03-13 16:54:26 +08001881 * @src_nents: number of segments in input scatterlist
1882 * @dst_nents: number of segments in output scatterlist
Yuan Kang1acebad2011-07-15 11:21:42 +08001883 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001884 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001885 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1886 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001887 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1888 */
Yuan Kang0e479302011-07-15 11:21:41 +08001889struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001890 int assoc_nents;
1891 int src_nents;
1892 int dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001893 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001894 int sec4_sg_bytes;
1895 dma_addr_t sec4_sg_dma;
1896 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +08001897 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +08001898};
1899
Yuan Kangacdca312011-07-15 11:21:42 +08001900/*
1901 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1902 * @src_nents: number of segments in input scatterlist
1903 * @dst_nents: number of segments in output scatterlist
1904 * @iv_dma: dma address of iv for checking continuity and link table
1905 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001906 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1907 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001908 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1909 */
1910struct ablkcipher_edesc {
1911 int src_nents;
1912 int dst_nents;
1913 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001914 int sec4_sg_bytes;
1915 dma_addr_t sec4_sg_dma;
1916 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001917 u32 hw_desc[0];
1918};
1919
Yuan Kang1acebad2011-07-15 11:21:42 +08001920static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001921 struct scatterlist *dst, int src_nents,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001922 int dst_nents,
Yuan Kanga299c832012-06-22 19:48:46 -05001923 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1924 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001925{
Yuan Kang643b39b2012-06-22 19:48:49 -05001926 if (dst != src) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001927 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1928 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001929 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001930 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001931 }
1932
Yuan Kang1acebad2011-07-15 11:21:42 +08001933 if (iv_dma)
1934 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001935 if (sec4_sg_bytes)
1936 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001937 DMA_TO_DEVICE);
1938}
1939
Yuan Kang1acebad2011-07-15 11:21:42 +08001940static void aead_unmap(struct device *dev,
1941 struct aead_edesc *edesc,
1942 struct aead_request *req)
1943{
Herbert Xuf2147b82015-06-16 13:54:23 +08001944 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001945 edesc->src_nents, edesc->dst_nents, 0, 0,
Herbert Xuf2147b82015-06-16 13:54:23 +08001946 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1947}
1948
Yuan Kangacdca312011-07-15 11:21:42 +08001949static void ablkcipher_unmap(struct device *dev,
1950 struct ablkcipher_edesc *edesc,
1951 struct ablkcipher_request *req)
1952{
1953 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1954 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1955
1956 caam_unmap(dev, req->src, req->dst,
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001957 edesc->src_nents, edesc->dst_nents,
1958 edesc->iv_dma, ivsize,
Yuan Kang643b39b2012-06-22 19:48:49 -05001959 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001960}
1961
Yuan Kang0e479302011-07-15 11:21:41 +08001962static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001963 void *context)
1964{
Yuan Kang0e479302011-07-15 11:21:41 +08001965 struct aead_request *req = context;
1966 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001967
1968#ifdef DEBUG
1969 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1970#endif
1971
1972 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1973
1974 if (err)
1975 caam_jr_strstatus(jrdev, err);
1976
1977 aead_unmap(jrdev, edesc, req);
1978
1979 kfree(edesc);
1980
1981 aead_request_complete(req, err);
1982}
1983
Yuan Kang0e479302011-07-15 11:21:41 +08001984static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001985 void *context)
1986{
Yuan Kang0e479302011-07-15 11:21:41 +08001987 struct aead_request *req = context;
1988 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001989
1990#ifdef DEBUG
1991 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1992#endif
1993
1994 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1995
1996 if (err)
1997 caam_jr_strstatus(jrdev, err);
1998
1999 aead_unmap(jrdev, edesc, req);
2000
2001 /*
2002 * verify hw auth check passed else return -EBADMSG
2003 */
2004 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2005 err = -EBADMSG;
2006
2007 kfree(edesc);
2008
2009 aead_request_complete(req, err);
2010}
2011
Yuan Kangacdca312011-07-15 11:21:42 +08002012static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2013 void *context)
2014{
2015 struct ablkcipher_request *req = context;
2016 struct ablkcipher_edesc *edesc;
2017#ifdef DEBUG
2018 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2019 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2020
2021 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2022#endif
2023
2024 edesc = (struct ablkcipher_edesc *)((char *)desc -
2025 offsetof(struct ablkcipher_edesc, hw_desc));
2026
Marek Vasutfa9659c2014-04-24 20:05:12 +02002027 if (err)
2028 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002029
2030#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002031 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002032 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2033 edesc->src_nents > 1 ? 100 : ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002034 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2035 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2036 edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
Yuan Kangacdca312011-07-15 11:21:42 +08002037#endif
2038
2039 ablkcipher_unmap(jrdev, edesc, req);
2040 kfree(edesc);
2041
2042 ablkcipher_request_complete(req, err);
2043}
2044
2045static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2046 void *context)
2047{
2048 struct ablkcipher_request *req = context;
2049 struct ablkcipher_edesc *edesc;
2050#ifdef DEBUG
2051 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2052 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2053
2054 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2055#endif
2056
2057 edesc = (struct ablkcipher_edesc *)((char *)desc -
2058 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002059 if (err)
2060 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002061
2062#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002063 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002064 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2065 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002066 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2067 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2068 edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
Yuan Kangacdca312011-07-15 11:21:42 +08002069#endif
2070
2071 ablkcipher_unmap(jrdev, edesc, req);
2072 kfree(edesc);
2073
2074 ablkcipher_request_complete(req, err);
2075}
2076
Kim Phillips8e8ec592011-03-13 16:54:26 +08002077/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002078 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002079 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002080static void init_aead_job(struct aead_request *req,
2081 struct aead_edesc *edesc,
2082 bool all_contig, bool encrypt)
2083{
2084 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2085 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2086 int authsize = ctx->authsize;
2087 u32 *desc = edesc->hw_desc;
2088 u32 out_options, in_options;
2089 dma_addr_t dst_dma, src_dma;
2090 int len, sec4_sg_index = 0;
2091 dma_addr_t ptr;
2092 u32 *sh_desc;
2093
2094 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2095 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2096
2097 len = desc_len(sh_desc);
2098 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2099
2100 if (all_contig) {
2101 src_dma = sg_dma_address(req->src);
2102 in_options = 0;
2103 } else {
2104 src_dma = edesc->sec4_sg_dma;
2105 sec4_sg_index += edesc->src_nents;
2106 in_options = LDST_SGF;
2107 }
2108
2109 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2110 in_options);
2111
2112 dst_dma = src_dma;
2113 out_options = in_options;
2114
2115 if (unlikely(req->src != req->dst)) {
2116 if (!edesc->dst_nents) {
2117 dst_dma = sg_dma_address(req->dst);
2118 } else {
2119 dst_dma = edesc->sec4_sg_dma +
2120 sec4_sg_index *
2121 sizeof(struct sec4_sg_entry);
2122 out_options = LDST_SGF;
2123 }
2124 }
2125
2126 if (encrypt)
2127 append_seq_out_ptr(desc, dst_dma,
2128 req->assoclen + req->cryptlen + authsize,
2129 out_options);
2130 else
2131 append_seq_out_ptr(desc, dst_dma,
2132 req->assoclen + req->cryptlen - authsize,
2133 out_options);
2134
2135 /* REG3 = assoclen */
2136 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2137}
2138
2139static void init_gcm_job(struct aead_request *req,
2140 struct aead_edesc *edesc,
2141 bool all_contig, bool encrypt)
2142{
2143 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2144 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2145 unsigned int ivsize = crypto_aead_ivsize(aead);
2146 u32 *desc = edesc->hw_desc;
2147 bool generic_gcm = (ivsize == 12);
2148 unsigned int last;
2149
2150 init_aead_job(req, edesc, all_contig, encrypt);
2151
2152 /* BUG This should not be specific to generic GCM. */
2153 last = 0;
2154 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2155 last = FIFOLD_TYPE_LAST1;
2156
2157 /* Read GCM IV */
2158 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2159 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2160 /* Append Salt */
2161 if (!generic_gcm)
2162 append_data(desc, ctx->key + ctx->enckeylen, 4);
2163 /* Append IV */
2164 append_data(desc, req->iv, ivsize);
2165 /* End of blank commands */
2166}
2167
Herbert Xu479bcc72015-07-30 17:53:17 +08002168static void init_authenc_job(struct aead_request *req,
2169 struct aead_edesc *edesc,
2170 bool all_contig, bool encrypt)
Yuan Kang1acebad2011-07-15 11:21:42 +08002171{
2172 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Herbert Xu479bcc72015-07-30 17:53:17 +08002173 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2174 struct caam_aead_alg, aead);
2175 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002176 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Herbert Xu479bcc72015-07-30 17:53:17 +08002177 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2178 OP_ALG_AAI_CTR_MOD128);
2179 const bool is_rfc3686 = alg->caam.rfc3686;
Yuan Kang1acebad2011-07-15 11:21:42 +08002180 u32 *desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002181 u32 ivoffset = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002182
Herbert Xu479bcc72015-07-30 17:53:17 +08002183 /*
2184 * AES-CTR needs to load IV in CONTEXT1 reg
2185 * at an offset of 128bits (16bytes)
2186 * CONTEXT1[255:128] = IV
2187 */
2188 if (ctr_mode)
2189 ivoffset = 16;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002190
Herbert Xu479bcc72015-07-30 17:53:17 +08002191 /*
2192 * RFC3686 specific:
2193 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2194 */
2195 if (is_rfc3686)
2196 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002197
Herbert Xu479bcc72015-07-30 17:53:17 +08002198 init_aead_job(req, edesc, all_contig, encrypt);
Yuan Kang1acebad2011-07-15 11:21:42 +08002199
Horia Geantă8b18e232016-08-29 14:52:14 +03002200 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
Herbert Xu479bcc72015-07-30 17:53:17 +08002201 append_load_as_imm(desc, req->iv, ivsize,
2202 LDST_CLASS_1_CCB |
2203 LDST_SRCDST_BYTE_CONTEXT |
2204 (ivoffset << LDST_OFFSET_SHIFT));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002205}
2206
2207/*
Yuan Kangacdca312011-07-15 11:21:42 +08002208 * Fill in ablkcipher job descriptor
2209 */
2210static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2211 struct ablkcipher_edesc *edesc,
2212 struct ablkcipher_request *req,
2213 bool iv_contig)
2214{
2215 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2216 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2217 u32 *desc = edesc->hw_desc;
2218 u32 out_options = 0, in_options;
2219 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002220 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002221
2222#ifdef DEBUG
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002223 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2224 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
Alex Porosanu514df282013-08-14 18:56:45 +03002225 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002226 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2227 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002228 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
2229 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
2230 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2231 edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
Yuan Kangacdca312011-07-15 11:21:42 +08002232#endif
2233
2234 len = desc_len(sh_desc);
2235 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2236
2237 if (iv_contig) {
2238 src_dma = edesc->iv_dma;
2239 in_options = 0;
2240 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002241 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02002242 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08002243 in_options = LDST_SGF;
2244 }
2245 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2246
2247 if (likely(req->src == req->dst)) {
2248 if (!edesc->src_nents && iv_contig) {
2249 dst_dma = sg_dma_address(req->src);
2250 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002251 dst_dma = edesc->sec4_sg_dma +
2252 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002253 out_options = LDST_SGF;
2254 }
2255 } else {
2256 if (!edesc->dst_nents) {
2257 dst_dma = sg_dma_address(req->dst);
2258 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002259 dst_dma = edesc->sec4_sg_dma +
2260 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002261 out_options = LDST_SGF;
2262 }
2263 }
2264 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2265}
2266
2267/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002268 * Fill in ablkcipher givencrypt job descriptor
2269 */
2270static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2271 struct ablkcipher_edesc *edesc,
2272 struct ablkcipher_request *req,
2273 bool iv_contig)
2274{
2275 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2276 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2277 u32 *desc = edesc->hw_desc;
2278 u32 out_options, in_options;
2279 dma_addr_t dst_dma, src_dma;
2280 int len, sec4_sg_index = 0;
2281
2282#ifdef DEBUG
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002283 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2284 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002285 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2286 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2287 ivsize, 1);
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002288 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2289 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2290 edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002291#endif
2292
2293 len = desc_len(sh_desc);
2294 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2295
2296 if (!edesc->src_nents) {
2297 src_dma = sg_dma_address(req->src);
2298 in_options = 0;
2299 } else {
2300 src_dma = edesc->sec4_sg_dma;
2301 sec4_sg_index += edesc->src_nents;
2302 in_options = LDST_SGF;
2303 }
2304 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2305
2306 if (iv_contig) {
2307 dst_dma = edesc->iv_dma;
2308 out_options = 0;
2309 } else {
2310 dst_dma = edesc->sec4_sg_dma +
2311 sec4_sg_index * sizeof(struct sec4_sg_entry);
2312 out_options = LDST_SGF;
2313 }
2314 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2315}
2316
2317/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002318 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002319 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002320static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2321 int desc_bytes, bool *all_contig_ptr,
2322 bool encrypt)
2323{
2324 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2325 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2326 struct device *jrdev = ctx->jrdev;
2327 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2328 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2329 int src_nents, dst_nents = 0;
2330 struct aead_edesc *edesc;
2331 int sgc;
2332 bool all_contig = true;
Herbert Xuf2147b82015-06-16 13:54:23 +08002333 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2334 unsigned int authsize = ctx->authsize;
2335
2336 if (unlikely(req->dst != req->src)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002337 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
Herbert Xuf2147b82015-06-16 13:54:23 +08002338 dst_nents = sg_count(req->dst,
2339 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002340 (encrypt ? authsize : (-authsize)));
Herbert Xuf2147b82015-06-16 13:54:23 +08002341 } else {
2342 src_nents = sg_count(req->src,
2343 req->assoclen + req->cryptlen +
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002344 (encrypt ? authsize : 0));
Herbert Xuf2147b82015-06-16 13:54:23 +08002345 }
2346
2347 /* Check if data are contiguous. */
2348 all_contig = !src_nents;
2349 if (!all_contig) {
2350 src_nents = src_nents ? : 1;
2351 sec4_sg_len = src_nents;
2352 }
2353
2354 sec4_sg_len += dst_nents;
2355
2356 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2357
2358 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002359 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2360 GFP_DMA | flags);
Herbert Xuf2147b82015-06-16 13:54:23 +08002361 if (!edesc) {
2362 dev_err(jrdev, "could not allocate extended descriptor\n");
2363 return ERR_PTR(-ENOMEM);
2364 }
2365
2366 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002367 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2368 DMA_BIDIRECTIONAL);
Herbert Xuf2147b82015-06-16 13:54:23 +08002369 if (unlikely(!sgc)) {
2370 dev_err(jrdev, "unable to map source\n");
2371 kfree(edesc);
2372 return ERR_PTR(-ENOMEM);
2373 }
2374 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002375 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2376 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002377 if (unlikely(!sgc)) {
2378 dev_err(jrdev, "unable to map source\n");
2379 kfree(edesc);
2380 return ERR_PTR(-ENOMEM);
2381 }
2382
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002383 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2384 DMA_FROM_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002385 if (unlikely(!sgc)) {
2386 dev_err(jrdev, "unable to map destination\n");
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002387 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2388 DMA_TO_DEVICE);
Herbert Xuf2147b82015-06-16 13:54:23 +08002389 kfree(edesc);
2390 return ERR_PTR(-ENOMEM);
2391 }
2392 }
2393
2394 edesc->src_nents = src_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002395 edesc->dst_nents = dst_nents;
Herbert Xuf2147b82015-06-16 13:54:23 +08002396 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2397 desc_bytes;
2398 *all_contig_ptr = all_contig;
2399
2400 sec4_sg_index = 0;
2401 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08002402 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08002403 edesc->sec4_sg + sec4_sg_index, 0);
2404 sec4_sg_index += src_nents;
2405 }
2406 if (dst_nents) {
2407 sg_to_sec4_sg_last(req->dst, dst_nents,
2408 edesc->sec4_sg + sec4_sg_index, 0);
2409 }
2410
2411 if (!sec4_sg_bytes)
2412 return edesc;
2413
2414 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2415 sec4_sg_bytes, DMA_TO_DEVICE);
2416 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2417 dev_err(jrdev, "unable to map S/G table\n");
2418 aead_unmap(jrdev, edesc, req);
2419 kfree(edesc);
2420 return ERR_PTR(-ENOMEM);
2421 }
2422
2423 edesc->sec4_sg_bytes = sec4_sg_bytes;
2424
2425 return edesc;
2426}
2427
2428static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002429{
Yuan Kang0e479302011-07-15 11:21:41 +08002430 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002431 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002432 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2433 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002434 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002435 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002436 int ret = 0;
2437
Kim Phillips8e8ec592011-03-13 16:54:26 +08002438 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002439 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002440 if (IS_ERR(edesc))
2441 return PTR_ERR(edesc);
2442
Yuan Kang1acebad2011-07-15 11:21:42 +08002443 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002444 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002445#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002446 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002447 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2448 desc_bytes(edesc->hw_desc), 1);
2449#endif
2450
Kim Phillips8e8ec592011-03-13 16:54:26 +08002451 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002452 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2453 if (!ret) {
2454 ret = -EINPROGRESS;
2455 } else {
2456 aead_unmap(jrdev, edesc, req);
2457 kfree(edesc);
2458 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002459
Yuan Kang1acebad2011-07-15 11:21:42 +08002460 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002461}
2462
Herbert Xu46218752015-07-09 07:17:33 +08002463static int ipsec_gcm_encrypt(struct aead_request *req)
2464{
2465 if (req->assoclen < 8)
2466 return -EINVAL;
2467
2468 return gcm_encrypt(req);
2469}
2470
Herbert Xu479bcc72015-07-30 17:53:17 +08002471static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002472{
Yuan Kang1acebad2011-07-15 11:21:42 +08002473 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002474 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002475 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2476 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002477 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002478 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002479 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002480
2481 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002482 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2483 &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08002484 if (IS_ERR(edesc))
2485 return PTR_ERR(edesc);
2486
Herbert Xuf2147b82015-06-16 13:54:23 +08002487 /* Create and submit job descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002488 init_authenc_job(req, edesc, all_contig, true);
Yuan Kang1acebad2011-07-15 11:21:42 +08002489#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08002490 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2491 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2492 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002493#endif
2494
Herbert Xuf2147b82015-06-16 13:54:23 +08002495 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002496 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002497 if (!ret) {
2498 ret = -EINPROGRESS;
2499 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002500 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002501 kfree(edesc);
2502 }
2503
2504 return ret;
2505}
2506
2507static int gcm_decrypt(struct aead_request *req)
2508{
2509 struct aead_edesc *edesc;
2510 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2511 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2512 struct device *jrdev = ctx->jrdev;
2513 bool all_contig;
2514 u32 *desc;
2515 int ret = 0;
2516
2517 /* allocate extended descriptor */
2518 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2519 if (IS_ERR(edesc))
2520 return PTR_ERR(edesc);
2521
Yuan Kang1acebad2011-07-15 11:21:42 +08002522 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08002523 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad2011-07-15 11:21:42 +08002524#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002525 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002526 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2527 desc_bytes(edesc->hw_desc), 1);
2528#endif
2529
Yuan Kang0e479302011-07-15 11:21:41 +08002530 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002531 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2532 if (!ret) {
2533 ret = -EINPROGRESS;
2534 } else {
2535 aead_unmap(jrdev, edesc, req);
2536 kfree(edesc);
2537 }
Yuan Kang0e479302011-07-15 11:21:41 +08002538
Yuan Kang1acebad2011-07-15 11:21:42 +08002539 return ret;
2540}
Yuan Kang0e479302011-07-15 11:21:41 +08002541
Herbert Xu46218752015-07-09 07:17:33 +08002542static int ipsec_gcm_decrypt(struct aead_request *req)
2543{
2544 if (req->assoclen < 8)
2545 return -EINVAL;
2546
2547 return gcm_decrypt(req);
2548}
2549
Herbert Xu479bcc72015-07-30 17:53:17 +08002550static int aead_decrypt(struct aead_request *req)
Herbert Xuf2147b82015-06-16 13:54:23 +08002551{
2552 struct aead_edesc *edesc;
2553 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2554 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2555 struct device *jrdev = ctx->jrdev;
2556 bool all_contig;
2557 u32 *desc;
2558 int ret = 0;
2559
Catalin Vasile5ecf8ef2016-09-22 11:57:58 +03002560#ifdef DEBUG
2561 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2562 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2563 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2564 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2565 req->assoclen + req->cryptlen, 1, may_sleep);
2566#endif
2567
Herbert Xuf2147b82015-06-16 13:54:23 +08002568 /* allocate extended descriptor */
Herbert Xu479bcc72015-07-30 17:53:17 +08002569 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2570 &all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002571 if (IS_ERR(edesc))
2572 return PTR_ERR(edesc);
2573
Herbert Xuf2147b82015-06-16 13:54:23 +08002574 /* Create and submit job descriptor*/
Herbert Xu479bcc72015-07-30 17:53:17 +08002575 init_authenc_job(req, edesc, all_contig, false);
Herbert Xuf2147b82015-06-16 13:54:23 +08002576#ifdef DEBUG
2577 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2578 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2579 desc_bytes(edesc->hw_desc), 1);
2580#endif
2581
2582 desc = edesc->hw_desc;
Herbert Xu479bcc72015-07-30 17:53:17 +08002583 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002584 if (!ret) {
2585 ret = -EINPROGRESS;
2586 } else {
Herbert Xu479bcc72015-07-30 17:53:17 +08002587 aead_unmap(jrdev, edesc, req);
Herbert Xuf2147b82015-06-16 13:54:23 +08002588 kfree(edesc);
2589 }
2590
2591 return ret;
2592}
2593
Yuan Kangacdca312011-07-15 11:21:42 +08002594/*
2595 * allocate and map the ablkcipher extended descriptor for ablkcipher
2596 */
2597static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2598 *req, int desc_bytes,
2599 bool *iv_contig_out)
2600{
2601 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2602 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2603 struct device *jrdev = ctx->jrdev;
2604 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2605 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2606 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002607 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002608 struct ablkcipher_edesc *edesc;
2609 dma_addr_t iv_dma = 0;
2610 bool iv_contig = false;
2611 int sgc;
2612 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kanga299c832012-06-22 19:48:46 -05002613 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002614
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002615 src_nents = sg_count(req->src, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002616
Yuan Kang643b39b2012-06-22 19:48:49 -05002617 if (req->dst != req->src)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002618 dst_nents = sg_count(req->dst, req->nbytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002619
2620 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002621 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2622 DMA_BIDIRECTIONAL);
Yuan Kangacdca312011-07-15 11:21:42 +08002623 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002624 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2625 DMA_TO_DEVICE);
2626 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2627 DMA_FROM_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08002628 }
2629
Horia Geantace572082014-07-11 15:34:49 +03002630 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2631 if (dma_mapping_error(jrdev, iv_dma)) {
2632 dev_err(jrdev, "unable to map IV\n");
2633 return ERR_PTR(-ENOMEM);
2634 }
2635
Yuan Kangacdca312011-07-15 11:21:42 +08002636 /*
2637 * Check if iv can be contiguous with source and destination.
2638 * If so, include it. If not, create scatterlist.
2639 */
Yuan Kangacdca312011-07-15 11:21:42 +08002640 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2641 iv_contig = true;
2642 else
2643 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002644 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2645 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002646
2647 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002648 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2649 GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002650 if (!edesc) {
2651 dev_err(jrdev, "could not allocate extended descriptor\n");
2652 return ERR_PTR(-ENOMEM);
2653 }
2654
2655 edesc->src_nents = src_nents;
2656 edesc->dst_nents = dst_nents;
Yuan Kanga299c832012-06-22 19:48:46 -05002657 edesc->sec4_sg_bytes = sec4_sg_bytes;
2658 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2659 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002660
Yuan Kanga299c832012-06-22 19:48:46 -05002661 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002662 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002663 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2664 sg_to_sec4_sg_last(req->src, src_nents,
2665 edesc->sec4_sg + 1, 0);
2666 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002667 }
2668
Yuan Kang643b39b2012-06-22 19:48:49 -05002669 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002670 sg_to_sec4_sg_last(req->dst, dst_nents,
2671 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002672 }
2673
Yuan Kanga299c832012-06-22 19:48:46 -05002674 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2675 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002676 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2677 dev_err(jrdev, "unable to map S/G table\n");
2678 return ERR_PTR(-ENOMEM);
2679 }
2680
Yuan Kangacdca312011-07-15 11:21:42 +08002681 edesc->iv_dma = iv_dma;
2682
2683#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002684 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002685 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2686 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002687#endif
2688
2689 *iv_contig_out = iv_contig;
2690 return edesc;
2691}
2692
2693static int ablkcipher_encrypt(struct ablkcipher_request *req)
2694{
2695 struct ablkcipher_edesc *edesc;
2696 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2697 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2698 struct device *jrdev = ctx->jrdev;
2699 bool iv_contig;
2700 u32 *desc;
2701 int ret = 0;
2702
2703 /* allocate extended descriptor */
2704 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2705 CAAM_CMD_SZ, &iv_contig);
2706 if (IS_ERR(edesc))
2707 return PTR_ERR(edesc);
2708
2709 /* Create and submit job descriptor*/
2710 init_ablkcipher_job(ctx->sh_desc_enc,
2711 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2712#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002713 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002714 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2715 desc_bytes(edesc->hw_desc), 1);
2716#endif
2717 desc = edesc->hw_desc;
2718 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2719
2720 if (!ret) {
2721 ret = -EINPROGRESS;
2722 } else {
2723 ablkcipher_unmap(jrdev, edesc, req);
2724 kfree(edesc);
2725 }
2726
2727 return ret;
2728}
2729
2730static int ablkcipher_decrypt(struct ablkcipher_request *req)
2731{
2732 struct ablkcipher_edesc *edesc;
2733 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2734 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2735 struct device *jrdev = ctx->jrdev;
2736 bool iv_contig;
2737 u32 *desc;
2738 int ret = 0;
2739
2740 /* allocate extended descriptor */
2741 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2742 CAAM_CMD_SZ, &iv_contig);
2743 if (IS_ERR(edesc))
2744 return PTR_ERR(edesc);
2745
2746 /* Create and submit job descriptor*/
2747 init_ablkcipher_job(ctx->sh_desc_dec,
2748 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2749 desc = edesc->hw_desc;
2750#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002751 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002752 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2753 desc_bytes(edesc->hw_desc), 1);
2754#endif
2755
2756 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2757 if (!ret) {
2758 ret = -EINPROGRESS;
2759 } else {
2760 ablkcipher_unmap(jrdev, edesc, req);
2761 kfree(edesc);
2762 }
2763
2764 return ret;
2765}
2766
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002767/*
2768 * allocate and map the ablkcipher extended descriptor
2769 * for ablkcipher givencrypt
2770 */
2771static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2772 struct skcipher_givcrypt_request *greq,
2773 int desc_bytes,
2774 bool *iv_contig_out)
2775{
2776 struct ablkcipher_request *req = &greq->creq;
2777 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2778 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2779 struct device *jrdev = ctx->jrdev;
2780 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2781 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2782 GFP_KERNEL : GFP_ATOMIC;
2783 int src_nents, dst_nents = 0, sec4_sg_bytes;
2784 struct ablkcipher_edesc *edesc;
2785 dma_addr_t iv_dma = 0;
2786 bool iv_contig = false;
2787 int sgc;
2788 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002789 int sec4_sg_index;
2790
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002791 src_nents = sg_count(req->src, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002792
2793 if (unlikely(req->dst != req->src))
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002794 dst_nents = sg_count(req->dst, req->nbytes);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002795
2796 if (likely(req->src == req->dst)) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002797 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2798 DMA_BIDIRECTIONAL);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002799 } else {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02002800 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2801 DMA_TO_DEVICE);
2802 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2803 DMA_FROM_DEVICE);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002804 }
2805
2806 /*
2807 * Check if iv can be contiguous with source and destination.
2808 * If so, include it. If not, create scatterlist.
2809 */
2810 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2811 if (dma_mapping_error(jrdev, iv_dma)) {
2812 dev_err(jrdev, "unable to map IV\n");
2813 return ERR_PTR(-ENOMEM);
2814 }
2815
2816 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2817 iv_contig = true;
2818 else
2819 dst_nents = dst_nents ? : 1;
2820 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2821 sizeof(struct sec4_sg_entry);
2822
2823 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07002824 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2825 GFP_DMA | flags);
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002826 if (!edesc) {
2827 dev_err(jrdev, "could not allocate extended descriptor\n");
2828 return ERR_PTR(-ENOMEM);
2829 }
2830
2831 edesc->src_nents = src_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002832 edesc->dst_nents = dst_nents;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002833 edesc->sec4_sg_bytes = sec4_sg_bytes;
2834 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2835 desc_bytes;
2836
2837 sec4_sg_index = 0;
2838 if (src_nents) {
2839 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2840 sec4_sg_index += src_nents;
2841 }
2842
2843 if (!iv_contig) {
2844 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2845 iv_dma, ivsize, 0);
2846 sec4_sg_index += 1;
2847 sg_to_sec4_sg_last(req->dst, dst_nents,
2848 edesc->sec4_sg + sec4_sg_index, 0);
2849 }
2850
2851 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2852 sec4_sg_bytes, DMA_TO_DEVICE);
2853 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2854 dev_err(jrdev, "unable to map S/G table\n");
2855 return ERR_PTR(-ENOMEM);
2856 }
2857 edesc->iv_dma = iv_dma;
2858
2859#ifdef DEBUG
2860 print_hex_dump(KERN_ERR,
2861 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2862 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2863 sec4_sg_bytes, 1);
2864#endif
2865
2866 *iv_contig_out = iv_contig;
2867 return edesc;
2868}
2869
2870static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2871{
2872 struct ablkcipher_request *req = &creq->creq;
2873 struct ablkcipher_edesc *edesc;
2874 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2875 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2876 struct device *jrdev = ctx->jrdev;
2877 bool iv_contig;
2878 u32 *desc;
2879 int ret = 0;
2880
2881 /* allocate extended descriptor */
2882 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2883 CAAM_CMD_SZ, &iv_contig);
2884 if (IS_ERR(edesc))
2885 return PTR_ERR(edesc);
2886
2887 /* Create and submit job descriptor*/
2888 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2889 edesc, req, iv_contig);
2890#ifdef DEBUG
2891 print_hex_dump(KERN_ERR,
2892 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2893 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2894 desc_bytes(edesc->hw_desc), 1);
2895#endif
2896 desc = edesc->hw_desc;
2897 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2898
2899 if (!ret) {
2900 ret = -EINPROGRESS;
2901 } else {
2902 ablkcipher_unmap(jrdev, edesc, req);
2903 kfree(edesc);
2904 }
2905
2906 return ret;
2907}
2908
Yuan Kang885e9e22011-07-15 11:21:41 +08002909#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002910#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002911struct caam_alg_template {
2912 char name[CRYPTO_MAX_ALG_NAME];
2913 char driver_name[CRYPTO_MAX_ALG_NAME];
2914 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002915 u32 type;
2916 union {
2917 struct ablkcipher_alg ablkcipher;
Yuan Kang885e9e22011-07-15 11:21:41 +08002918 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002919 u32 class1_alg_type;
2920 u32 class2_alg_type;
2921 u32 alg_op;
2922};
2923
2924static struct caam_alg_template driver_algs[] = {
Yuan Kangacdca312011-07-15 11:21:42 +08002925 /* ablkcipher descriptor */
2926 {
2927 .name = "cbc(aes)",
2928 .driver_name = "cbc-aes-caam",
2929 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002930 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002931 .template_ablkcipher = {
2932 .setkey = ablkcipher_setkey,
2933 .encrypt = ablkcipher_encrypt,
2934 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002935 .givencrypt = ablkcipher_givencrypt,
2936 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002937 .min_keysize = AES_MIN_KEY_SIZE,
2938 .max_keysize = AES_MAX_KEY_SIZE,
2939 .ivsize = AES_BLOCK_SIZE,
2940 },
2941 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2942 },
2943 {
2944 .name = "cbc(des3_ede)",
2945 .driver_name = "cbc-3des-caam",
2946 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002947 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002948 .template_ablkcipher = {
2949 .setkey = ablkcipher_setkey,
2950 .encrypt = ablkcipher_encrypt,
2951 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002952 .givencrypt = ablkcipher_givencrypt,
2953 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002954 .min_keysize = DES3_EDE_KEY_SIZE,
2955 .max_keysize = DES3_EDE_KEY_SIZE,
2956 .ivsize = DES3_EDE_BLOCK_SIZE,
2957 },
2958 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2959 },
2960 {
2961 .name = "cbc(des)",
2962 .driver_name = "cbc-des-caam",
2963 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002964 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08002965 .template_ablkcipher = {
2966 .setkey = ablkcipher_setkey,
2967 .encrypt = ablkcipher_encrypt,
2968 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02002969 .givencrypt = ablkcipher_givencrypt,
2970 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08002971 .min_keysize = DES_KEY_SIZE,
2972 .max_keysize = DES_KEY_SIZE,
2973 .ivsize = DES_BLOCK_SIZE,
2974 },
2975 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02002976 },
2977 {
2978 .name = "ctr(aes)",
2979 .driver_name = "ctr-aes-caam",
2980 .blocksize = 1,
2981 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2982 .template_ablkcipher = {
2983 .setkey = ablkcipher_setkey,
2984 .encrypt = ablkcipher_encrypt,
2985 .decrypt = ablkcipher_decrypt,
2986 .geniv = "chainiv",
2987 .min_keysize = AES_MIN_KEY_SIZE,
2988 .max_keysize = AES_MAX_KEY_SIZE,
2989 .ivsize = AES_BLOCK_SIZE,
2990 },
2991 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002992 },
2993 {
2994 .name = "rfc3686(ctr(aes))",
2995 .driver_name = "rfc3686-ctr-aes-caam",
2996 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002997 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02002998 .template_ablkcipher = {
2999 .setkey = ablkcipher_setkey,
3000 .encrypt = ablkcipher_encrypt,
3001 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003002 .givencrypt = ablkcipher_givencrypt,
3003 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02003004 .min_keysize = AES_MIN_KEY_SIZE +
3005 CTR_RFC3686_NONCE_SIZE,
3006 .max_keysize = AES_MAX_KEY_SIZE +
3007 CTR_RFC3686_NONCE_SIZE,
3008 .ivsize = CTR_RFC3686_IV_SIZE,
3009 },
3010 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilec6415a62015-10-02 13:13:18 +03003011 },
3012 {
3013 .name = "xts(aes)",
3014 .driver_name = "xts-aes-caam",
3015 .blocksize = AES_BLOCK_SIZE,
3016 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3017 .template_ablkcipher = {
3018 .setkey = xts_ablkcipher_setkey,
3019 .encrypt = ablkcipher_encrypt,
3020 .decrypt = ablkcipher_decrypt,
3021 .geniv = "eseqiv",
3022 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3023 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3024 .ivsize = AES_BLOCK_SIZE,
3025 },
3026 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
3027 },
Kim Phillips8e8ec592011-03-13 16:54:26 +08003028};
3029
Herbert Xuf2147b82015-06-16 13:54:23 +08003030static struct caam_aead_alg driver_aeads[] = {
3031 {
3032 .aead = {
3033 .base = {
3034 .cra_name = "rfc4106(gcm(aes))",
3035 .cra_driver_name = "rfc4106-gcm-aes-caam",
3036 .cra_blocksize = 1,
3037 },
3038 .setkey = rfc4106_setkey,
3039 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003040 .encrypt = ipsec_gcm_encrypt,
3041 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003042 .ivsize = 8,
3043 .maxauthsize = AES_BLOCK_SIZE,
3044 },
3045 .caam = {
3046 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3047 },
3048 },
3049 {
3050 .aead = {
3051 .base = {
3052 .cra_name = "rfc4543(gcm(aes))",
3053 .cra_driver_name = "rfc4543-gcm-aes-caam",
3054 .cra_blocksize = 1,
3055 },
3056 .setkey = rfc4543_setkey,
3057 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08003058 .encrypt = ipsec_gcm_encrypt,
3059 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08003060 .ivsize = 8,
3061 .maxauthsize = AES_BLOCK_SIZE,
3062 },
3063 .caam = {
3064 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3065 },
3066 },
3067 /* Galois Counter Mode */
3068 {
3069 .aead = {
3070 .base = {
3071 .cra_name = "gcm(aes)",
3072 .cra_driver_name = "gcm-aes-caam",
3073 .cra_blocksize = 1,
3074 },
3075 .setkey = gcm_setkey,
3076 .setauthsize = gcm_setauthsize,
3077 .encrypt = gcm_encrypt,
3078 .decrypt = gcm_decrypt,
3079 .ivsize = 12,
3080 .maxauthsize = AES_BLOCK_SIZE,
3081 },
3082 .caam = {
3083 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3084 },
3085 },
Herbert Xu479bcc72015-07-30 17:53:17 +08003086 /* single-pass ipsec_esp descriptor */
3087 {
3088 .aead = {
3089 .base = {
3090 .cra_name = "authenc(hmac(md5),"
3091 "ecb(cipher_null))",
3092 .cra_driver_name = "authenc-hmac-md5-"
3093 "ecb-cipher_null-caam",
3094 .cra_blocksize = NULL_BLOCK_SIZE,
3095 },
3096 .setkey = aead_setkey,
3097 .setauthsize = aead_setauthsize,
3098 .encrypt = aead_encrypt,
3099 .decrypt = aead_decrypt,
3100 .ivsize = NULL_IV_SIZE,
3101 .maxauthsize = MD5_DIGEST_SIZE,
3102 },
3103 .caam = {
3104 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3105 OP_ALG_AAI_HMAC_PRECOMP,
3106 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3107 },
3108 },
3109 {
3110 .aead = {
3111 .base = {
3112 .cra_name = "authenc(hmac(sha1),"
3113 "ecb(cipher_null))",
3114 .cra_driver_name = "authenc-hmac-sha1-"
3115 "ecb-cipher_null-caam",
3116 .cra_blocksize = NULL_BLOCK_SIZE,
3117 },
3118 .setkey = aead_setkey,
3119 .setauthsize = aead_setauthsize,
3120 .encrypt = aead_encrypt,
3121 .decrypt = aead_decrypt,
3122 .ivsize = NULL_IV_SIZE,
3123 .maxauthsize = SHA1_DIGEST_SIZE,
3124 },
3125 .caam = {
3126 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3127 OP_ALG_AAI_HMAC_PRECOMP,
3128 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3129 },
3130 },
3131 {
3132 .aead = {
3133 .base = {
3134 .cra_name = "authenc(hmac(sha224),"
3135 "ecb(cipher_null))",
3136 .cra_driver_name = "authenc-hmac-sha224-"
3137 "ecb-cipher_null-caam",
3138 .cra_blocksize = NULL_BLOCK_SIZE,
3139 },
3140 .setkey = aead_setkey,
3141 .setauthsize = aead_setauthsize,
3142 .encrypt = aead_encrypt,
3143 .decrypt = aead_decrypt,
3144 .ivsize = NULL_IV_SIZE,
3145 .maxauthsize = SHA224_DIGEST_SIZE,
3146 },
3147 .caam = {
3148 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3149 OP_ALG_AAI_HMAC_PRECOMP,
3150 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3151 },
3152 },
3153 {
3154 .aead = {
3155 .base = {
3156 .cra_name = "authenc(hmac(sha256),"
3157 "ecb(cipher_null))",
3158 .cra_driver_name = "authenc-hmac-sha256-"
3159 "ecb-cipher_null-caam",
3160 .cra_blocksize = NULL_BLOCK_SIZE,
3161 },
3162 .setkey = aead_setkey,
3163 .setauthsize = aead_setauthsize,
3164 .encrypt = aead_encrypt,
3165 .decrypt = aead_decrypt,
3166 .ivsize = NULL_IV_SIZE,
3167 .maxauthsize = SHA256_DIGEST_SIZE,
3168 },
3169 .caam = {
3170 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3171 OP_ALG_AAI_HMAC_PRECOMP,
3172 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3173 },
3174 },
3175 {
3176 .aead = {
3177 .base = {
3178 .cra_name = "authenc(hmac(sha384),"
3179 "ecb(cipher_null))",
3180 .cra_driver_name = "authenc-hmac-sha384-"
3181 "ecb-cipher_null-caam",
3182 .cra_blocksize = NULL_BLOCK_SIZE,
3183 },
3184 .setkey = aead_setkey,
3185 .setauthsize = aead_setauthsize,
3186 .encrypt = aead_encrypt,
3187 .decrypt = aead_decrypt,
3188 .ivsize = NULL_IV_SIZE,
3189 .maxauthsize = SHA384_DIGEST_SIZE,
3190 },
3191 .caam = {
3192 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3193 OP_ALG_AAI_HMAC_PRECOMP,
3194 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3195 },
3196 },
3197 {
3198 .aead = {
3199 .base = {
3200 .cra_name = "authenc(hmac(sha512),"
3201 "ecb(cipher_null))",
3202 .cra_driver_name = "authenc-hmac-sha512-"
3203 "ecb-cipher_null-caam",
3204 .cra_blocksize = NULL_BLOCK_SIZE,
3205 },
3206 .setkey = aead_setkey,
3207 .setauthsize = aead_setauthsize,
3208 .encrypt = aead_encrypt,
3209 .decrypt = aead_decrypt,
3210 .ivsize = NULL_IV_SIZE,
3211 .maxauthsize = SHA512_DIGEST_SIZE,
3212 },
3213 .caam = {
3214 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3215 OP_ALG_AAI_HMAC_PRECOMP,
3216 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3217 },
3218 },
3219 {
3220 .aead = {
3221 .base = {
3222 .cra_name = "authenc(hmac(md5),cbc(aes))",
3223 .cra_driver_name = "authenc-hmac-md5-"
3224 "cbc-aes-caam",
3225 .cra_blocksize = AES_BLOCK_SIZE,
3226 },
3227 .setkey = aead_setkey,
3228 .setauthsize = aead_setauthsize,
3229 .encrypt = aead_encrypt,
3230 .decrypt = aead_decrypt,
3231 .ivsize = AES_BLOCK_SIZE,
3232 .maxauthsize = MD5_DIGEST_SIZE,
3233 },
3234 .caam = {
3235 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3236 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3237 OP_ALG_AAI_HMAC_PRECOMP,
3238 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3239 },
3240 },
3241 {
3242 .aead = {
3243 .base = {
3244 .cra_name = "echainiv(authenc(hmac(md5),"
3245 "cbc(aes)))",
3246 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3247 "cbc-aes-caam",
3248 .cra_blocksize = AES_BLOCK_SIZE,
3249 },
3250 .setkey = aead_setkey,
3251 .setauthsize = aead_setauthsize,
3252 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003253 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003254 .ivsize = AES_BLOCK_SIZE,
3255 .maxauthsize = MD5_DIGEST_SIZE,
3256 },
3257 .caam = {
3258 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3259 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3260 OP_ALG_AAI_HMAC_PRECOMP,
3261 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3262 .geniv = true,
3263 },
3264 },
3265 {
3266 .aead = {
3267 .base = {
3268 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3269 .cra_driver_name = "authenc-hmac-sha1-"
3270 "cbc-aes-caam",
3271 .cra_blocksize = AES_BLOCK_SIZE,
3272 },
3273 .setkey = aead_setkey,
3274 .setauthsize = aead_setauthsize,
3275 .encrypt = aead_encrypt,
3276 .decrypt = aead_decrypt,
3277 .ivsize = AES_BLOCK_SIZE,
3278 .maxauthsize = SHA1_DIGEST_SIZE,
3279 },
3280 .caam = {
3281 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3282 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3283 OP_ALG_AAI_HMAC_PRECOMP,
3284 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3285 },
3286 },
3287 {
3288 .aead = {
3289 .base = {
3290 .cra_name = "echainiv(authenc(hmac(sha1),"
3291 "cbc(aes)))",
3292 .cra_driver_name = "echainiv-authenc-"
3293 "hmac-sha1-cbc-aes-caam",
3294 .cra_blocksize = AES_BLOCK_SIZE,
3295 },
3296 .setkey = aead_setkey,
3297 .setauthsize = aead_setauthsize,
3298 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003299 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003300 .ivsize = AES_BLOCK_SIZE,
3301 .maxauthsize = SHA1_DIGEST_SIZE,
3302 },
3303 .caam = {
3304 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3305 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3306 OP_ALG_AAI_HMAC_PRECOMP,
3307 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3308 .geniv = true,
3309 },
3310 },
3311 {
3312 .aead = {
3313 .base = {
3314 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3315 .cra_driver_name = "authenc-hmac-sha224-"
3316 "cbc-aes-caam",
3317 .cra_blocksize = AES_BLOCK_SIZE,
3318 },
3319 .setkey = aead_setkey,
3320 .setauthsize = aead_setauthsize,
3321 .encrypt = aead_encrypt,
3322 .decrypt = aead_decrypt,
3323 .ivsize = AES_BLOCK_SIZE,
3324 .maxauthsize = SHA224_DIGEST_SIZE,
3325 },
3326 .caam = {
3327 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3328 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3329 OP_ALG_AAI_HMAC_PRECOMP,
3330 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3331 },
3332 },
3333 {
3334 .aead = {
3335 .base = {
3336 .cra_name = "echainiv(authenc(hmac(sha224),"
3337 "cbc(aes)))",
3338 .cra_driver_name = "echainiv-authenc-"
3339 "hmac-sha224-cbc-aes-caam",
3340 .cra_blocksize = AES_BLOCK_SIZE,
3341 },
3342 .setkey = aead_setkey,
3343 .setauthsize = aead_setauthsize,
3344 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003345 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003346 .ivsize = AES_BLOCK_SIZE,
3347 .maxauthsize = SHA224_DIGEST_SIZE,
3348 },
3349 .caam = {
3350 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3351 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3352 OP_ALG_AAI_HMAC_PRECOMP,
3353 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3354 .geniv = true,
3355 },
3356 },
3357 {
3358 .aead = {
3359 .base = {
3360 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3361 .cra_driver_name = "authenc-hmac-sha256-"
3362 "cbc-aes-caam",
3363 .cra_blocksize = AES_BLOCK_SIZE,
3364 },
3365 .setkey = aead_setkey,
3366 .setauthsize = aead_setauthsize,
3367 .encrypt = aead_encrypt,
3368 .decrypt = aead_decrypt,
3369 .ivsize = AES_BLOCK_SIZE,
3370 .maxauthsize = SHA256_DIGEST_SIZE,
3371 },
3372 .caam = {
3373 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3374 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3375 OP_ALG_AAI_HMAC_PRECOMP,
3376 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3377 },
3378 },
3379 {
3380 .aead = {
3381 .base = {
3382 .cra_name = "echainiv(authenc(hmac(sha256),"
3383 "cbc(aes)))",
3384 .cra_driver_name = "echainiv-authenc-"
3385 "hmac-sha256-cbc-aes-caam",
3386 .cra_blocksize = AES_BLOCK_SIZE,
3387 },
3388 .setkey = aead_setkey,
3389 .setauthsize = aead_setauthsize,
3390 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003391 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003392 .ivsize = AES_BLOCK_SIZE,
3393 .maxauthsize = SHA256_DIGEST_SIZE,
3394 },
3395 .caam = {
3396 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3397 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3398 OP_ALG_AAI_HMAC_PRECOMP,
3399 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3400 .geniv = true,
3401 },
3402 },
3403 {
3404 .aead = {
3405 .base = {
3406 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3407 .cra_driver_name = "authenc-hmac-sha384-"
3408 "cbc-aes-caam",
3409 .cra_blocksize = AES_BLOCK_SIZE,
3410 },
3411 .setkey = aead_setkey,
3412 .setauthsize = aead_setauthsize,
3413 .encrypt = aead_encrypt,
3414 .decrypt = aead_decrypt,
3415 .ivsize = AES_BLOCK_SIZE,
3416 .maxauthsize = SHA384_DIGEST_SIZE,
3417 },
3418 .caam = {
3419 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3420 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3421 OP_ALG_AAI_HMAC_PRECOMP,
3422 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3423 },
3424 },
3425 {
3426 .aead = {
3427 .base = {
3428 .cra_name = "echainiv(authenc(hmac(sha384),"
3429 "cbc(aes)))",
3430 .cra_driver_name = "echainiv-authenc-"
3431 "hmac-sha384-cbc-aes-caam",
3432 .cra_blocksize = AES_BLOCK_SIZE,
3433 },
3434 .setkey = aead_setkey,
3435 .setauthsize = aead_setauthsize,
3436 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003437 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003438 .ivsize = AES_BLOCK_SIZE,
3439 .maxauthsize = SHA384_DIGEST_SIZE,
3440 },
3441 .caam = {
3442 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3443 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3444 OP_ALG_AAI_HMAC_PRECOMP,
3445 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3446 .geniv = true,
3447 },
3448 },
3449 {
3450 .aead = {
3451 .base = {
3452 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3453 .cra_driver_name = "authenc-hmac-sha512-"
3454 "cbc-aes-caam",
3455 .cra_blocksize = AES_BLOCK_SIZE,
3456 },
3457 .setkey = aead_setkey,
3458 .setauthsize = aead_setauthsize,
3459 .encrypt = aead_encrypt,
3460 .decrypt = aead_decrypt,
3461 .ivsize = AES_BLOCK_SIZE,
3462 .maxauthsize = SHA512_DIGEST_SIZE,
3463 },
3464 .caam = {
3465 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3466 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3467 OP_ALG_AAI_HMAC_PRECOMP,
3468 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3469 },
3470 },
3471 {
3472 .aead = {
3473 .base = {
3474 .cra_name = "echainiv(authenc(hmac(sha512),"
3475 "cbc(aes)))",
3476 .cra_driver_name = "echainiv-authenc-"
3477 "hmac-sha512-cbc-aes-caam",
3478 .cra_blocksize = AES_BLOCK_SIZE,
3479 },
3480 .setkey = aead_setkey,
3481 .setauthsize = aead_setauthsize,
3482 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003483 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003484 .ivsize = AES_BLOCK_SIZE,
3485 .maxauthsize = SHA512_DIGEST_SIZE,
3486 },
3487 .caam = {
3488 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3489 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3490 OP_ALG_AAI_HMAC_PRECOMP,
3491 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3492 .geniv = true,
3493 },
3494 },
3495 {
3496 .aead = {
3497 .base = {
3498 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3499 .cra_driver_name = "authenc-hmac-md5-"
3500 "cbc-des3_ede-caam",
3501 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3502 },
3503 .setkey = aead_setkey,
3504 .setauthsize = aead_setauthsize,
3505 .encrypt = aead_encrypt,
3506 .decrypt = aead_decrypt,
3507 .ivsize = DES3_EDE_BLOCK_SIZE,
3508 .maxauthsize = MD5_DIGEST_SIZE,
3509 },
3510 .caam = {
3511 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3512 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3513 OP_ALG_AAI_HMAC_PRECOMP,
3514 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3515 }
3516 },
3517 {
3518 .aead = {
3519 .base = {
3520 .cra_name = "echainiv(authenc(hmac(md5),"
3521 "cbc(des3_ede)))",
3522 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3523 "cbc-des3_ede-caam",
3524 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3525 },
3526 .setkey = aead_setkey,
3527 .setauthsize = aead_setauthsize,
3528 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003529 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003530 .ivsize = DES3_EDE_BLOCK_SIZE,
3531 .maxauthsize = MD5_DIGEST_SIZE,
3532 },
3533 .caam = {
3534 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3535 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3536 OP_ALG_AAI_HMAC_PRECOMP,
3537 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3538 .geniv = true,
3539 }
3540 },
3541 {
3542 .aead = {
3543 .base = {
3544 .cra_name = "authenc(hmac(sha1),"
3545 "cbc(des3_ede))",
3546 .cra_driver_name = "authenc-hmac-sha1-"
3547 "cbc-des3_ede-caam",
3548 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3549 },
3550 .setkey = aead_setkey,
3551 .setauthsize = aead_setauthsize,
3552 .encrypt = aead_encrypt,
3553 .decrypt = aead_decrypt,
3554 .ivsize = DES3_EDE_BLOCK_SIZE,
3555 .maxauthsize = SHA1_DIGEST_SIZE,
3556 },
3557 .caam = {
3558 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3559 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3560 OP_ALG_AAI_HMAC_PRECOMP,
3561 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3562 },
3563 },
3564 {
3565 .aead = {
3566 .base = {
3567 .cra_name = "echainiv(authenc(hmac(sha1),"
3568 "cbc(des3_ede)))",
3569 .cra_driver_name = "echainiv-authenc-"
3570 "hmac-sha1-"
3571 "cbc-des3_ede-caam",
3572 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3573 },
3574 .setkey = aead_setkey,
3575 .setauthsize = aead_setauthsize,
3576 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003577 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003578 .ivsize = DES3_EDE_BLOCK_SIZE,
3579 .maxauthsize = SHA1_DIGEST_SIZE,
3580 },
3581 .caam = {
3582 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3583 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3584 OP_ALG_AAI_HMAC_PRECOMP,
3585 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3586 .geniv = true,
3587 },
3588 },
3589 {
3590 .aead = {
3591 .base = {
3592 .cra_name = "authenc(hmac(sha224),"
3593 "cbc(des3_ede))",
3594 .cra_driver_name = "authenc-hmac-sha224-"
3595 "cbc-des3_ede-caam",
3596 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3597 },
3598 .setkey = aead_setkey,
3599 .setauthsize = aead_setauthsize,
3600 .encrypt = aead_encrypt,
3601 .decrypt = aead_decrypt,
3602 .ivsize = DES3_EDE_BLOCK_SIZE,
3603 .maxauthsize = SHA224_DIGEST_SIZE,
3604 },
3605 .caam = {
3606 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3607 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3608 OP_ALG_AAI_HMAC_PRECOMP,
3609 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3610 },
3611 },
3612 {
3613 .aead = {
3614 .base = {
3615 .cra_name = "echainiv(authenc(hmac(sha224),"
3616 "cbc(des3_ede)))",
3617 .cra_driver_name = "echainiv-authenc-"
3618 "hmac-sha224-"
3619 "cbc-des3_ede-caam",
3620 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3621 },
3622 .setkey = aead_setkey,
3623 .setauthsize = aead_setauthsize,
3624 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003625 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003626 .ivsize = DES3_EDE_BLOCK_SIZE,
3627 .maxauthsize = SHA224_DIGEST_SIZE,
3628 },
3629 .caam = {
3630 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3631 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3632 OP_ALG_AAI_HMAC_PRECOMP,
3633 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3634 .geniv = true,
3635 },
3636 },
3637 {
3638 .aead = {
3639 .base = {
3640 .cra_name = "authenc(hmac(sha256),"
3641 "cbc(des3_ede))",
3642 .cra_driver_name = "authenc-hmac-sha256-"
3643 "cbc-des3_ede-caam",
3644 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3645 },
3646 .setkey = aead_setkey,
3647 .setauthsize = aead_setauthsize,
3648 .encrypt = aead_encrypt,
3649 .decrypt = aead_decrypt,
3650 .ivsize = DES3_EDE_BLOCK_SIZE,
3651 .maxauthsize = SHA256_DIGEST_SIZE,
3652 },
3653 .caam = {
3654 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3655 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3656 OP_ALG_AAI_HMAC_PRECOMP,
3657 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3658 },
3659 },
3660 {
3661 .aead = {
3662 .base = {
3663 .cra_name = "echainiv(authenc(hmac(sha256),"
3664 "cbc(des3_ede)))",
3665 .cra_driver_name = "echainiv-authenc-"
3666 "hmac-sha256-"
3667 "cbc-des3_ede-caam",
3668 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3669 },
3670 .setkey = aead_setkey,
3671 .setauthsize = aead_setauthsize,
3672 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003673 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003674 .ivsize = DES3_EDE_BLOCK_SIZE,
3675 .maxauthsize = SHA256_DIGEST_SIZE,
3676 },
3677 .caam = {
3678 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3679 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3680 OP_ALG_AAI_HMAC_PRECOMP,
3681 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3682 .geniv = true,
3683 },
3684 },
3685 {
3686 .aead = {
3687 .base = {
3688 .cra_name = "authenc(hmac(sha384),"
3689 "cbc(des3_ede))",
3690 .cra_driver_name = "authenc-hmac-sha384-"
3691 "cbc-des3_ede-caam",
3692 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3693 },
3694 .setkey = aead_setkey,
3695 .setauthsize = aead_setauthsize,
3696 .encrypt = aead_encrypt,
3697 .decrypt = aead_decrypt,
3698 .ivsize = DES3_EDE_BLOCK_SIZE,
3699 .maxauthsize = SHA384_DIGEST_SIZE,
3700 },
3701 .caam = {
3702 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3703 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3704 OP_ALG_AAI_HMAC_PRECOMP,
3705 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3706 },
3707 },
3708 {
3709 .aead = {
3710 .base = {
3711 .cra_name = "echainiv(authenc(hmac(sha384),"
3712 "cbc(des3_ede)))",
3713 .cra_driver_name = "echainiv-authenc-"
3714 "hmac-sha384-"
3715 "cbc-des3_ede-caam",
3716 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3717 },
3718 .setkey = aead_setkey,
3719 .setauthsize = aead_setauthsize,
3720 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003721 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003722 .ivsize = DES3_EDE_BLOCK_SIZE,
3723 .maxauthsize = SHA384_DIGEST_SIZE,
3724 },
3725 .caam = {
3726 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3727 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3728 OP_ALG_AAI_HMAC_PRECOMP,
3729 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3730 .geniv = true,
3731 },
3732 },
3733 {
3734 .aead = {
3735 .base = {
3736 .cra_name = "authenc(hmac(sha512),"
3737 "cbc(des3_ede))",
3738 .cra_driver_name = "authenc-hmac-sha512-"
3739 "cbc-des3_ede-caam",
3740 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3741 },
3742 .setkey = aead_setkey,
3743 .setauthsize = aead_setauthsize,
3744 .encrypt = aead_encrypt,
3745 .decrypt = aead_decrypt,
3746 .ivsize = DES3_EDE_BLOCK_SIZE,
3747 .maxauthsize = SHA512_DIGEST_SIZE,
3748 },
3749 .caam = {
3750 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3751 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3752 OP_ALG_AAI_HMAC_PRECOMP,
3753 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3754 },
3755 },
3756 {
3757 .aead = {
3758 .base = {
3759 .cra_name = "echainiv(authenc(hmac(sha512),"
3760 "cbc(des3_ede)))",
3761 .cra_driver_name = "echainiv-authenc-"
3762 "hmac-sha512-"
3763 "cbc-des3_ede-caam",
3764 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3765 },
3766 .setkey = aead_setkey,
3767 .setauthsize = aead_setauthsize,
3768 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003769 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003770 .ivsize = DES3_EDE_BLOCK_SIZE,
3771 .maxauthsize = SHA512_DIGEST_SIZE,
3772 },
3773 .caam = {
3774 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3775 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3776 OP_ALG_AAI_HMAC_PRECOMP,
3777 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3778 .geniv = true,
3779 },
3780 },
3781 {
3782 .aead = {
3783 .base = {
3784 .cra_name = "authenc(hmac(md5),cbc(des))",
3785 .cra_driver_name = "authenc-hmac-md5-"
3786 "cbc-des-caam",
3787 .cra_blocksize = DES_BLOCK_SIZE,
3788 },
3789 .setkey = aead_setkey,
3790 .setauthsize = aead_setauthsize,
3791 .encrypt = aead_encrypt,
3792 .decrypt = aead_decrypt,
3793 .ivsize = DES_BLOCK_SIZE,
3794 .maxauthsize = MD5_DIGEST_SIZE,
3795 },
3796 .caam = {
3797 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3798 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3799 OP_ALG_AAI_HMAC_PRECOMP,
3800 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3801 },
3802 },
3803 {
3804 .aead = {
3805 .base = {
3806 .cra_name = "echainiv(authenc(hmac(md5),"
3807 "cbc(des)))",
3808 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3809 "cbc-des-caam",
3810 .cra_blocksize = DES_BLOCK_SIZE,
3811 },
3812 .setkey = aead_setkey,
3813 .setauthsize = aead_setauthsize,
3814 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003815 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003816 .ivsize = DES_BLOCK_SIZE,
3817 .maxauthsize = MD5_DIGEST_SIZE,
3818 },
3819 .caam = {
3820 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3821 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3822 OP_ALG_AAI_HMAC_PRECOMP,
3823 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3824 .geniv = true,
3825 },
3826 },
3827 {
3828 .aead = {
3829 .base = {
3830 .cra_name = "authenc(hmac(sha1),cbc(des))",
3831 .cra_driver_name = "authenc-hmac-sha1-"
3832 "cbc-des-caam",
3833 .cra_blocksize = DES_BLOCK_SIZE,
3834 },
3835 .setkey = aead_setkey,
3836 .setauthsize = aead_setauthsize,
3837 .encrypt = aead_encrypt,
3838 .decrypt = aead_decrypt,
3839 .ivsize = DES_BLOCK_SIZE,
3840 .maxauthsize = SHA1_DIGEST_SIZE,
3841 },
3842 .caam = {
3843 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3844 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3845 OP_ALG_AAI_HMAC_PRECOMP,
3846 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3847 },
3848 },
3849 {
3850 .aead = {
3851 .base = {
3852 .cra_name = "echainiv(authenc(hmac(sha1),"
3853 "cbc(des)))",
3854 .cra_driver_name = "echainiv-authenc-"
3855 "hmac-sha1-cbc-des-caam",
3856 .cra_blocksize = DES_BLOCK_SIZE,
3857 },
3858 .setkey = aead_setkey,
3859 .setauthsize = aead_setauthsize,
3860 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003861 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003862 .ivsize = DES_BLOCK_SIZE,
3863 .maxauthsize = SHA1_DIGEST_SIZE,
3864 },
3865 .caam = {
3866 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3867 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3868 OP_ALG_AAI_HMAC_PRECOMP,
3869 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3870 .geniv = true,
3871 },
3872 },
3873 {
3874 .aead = {
3875 .base = {
3876 .cra_name = "authenc(hmac(sha224),cbc(des))",
3877 .cra_driver_name = "authenc-hmac-sha224-"
3878 "cbc-des-caam",
3879 .cra_blocksize = DES_BLOCK_SIZE,
3880 },
3881 .setkey = aead_setkey,
3882 .setauthsize = aead_setauthsize,
3883 .encrypt = aead_encrypt,
3884 .decrypt = aead_decrypt,
3885 .ivsize = DES_BLOCK_SIZE,
3886 .maxauthsize = SHA224_DIGEST_SIZE,
3887 },
3888 .caam = {
3889 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3890 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3891 OP_ALG_AAI_HMAC_PRECOMP,
3892 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3893 },
3894 },
3895 {
3896 .aead = {
3897 .base = {
3898 .cra_name = "echainiv(authenc(hmac(sha224),"
3899 "cbc(des)))",
3900 .cra_driver_name = "echainiv-authenc-"
3901 "hmac-sha224-cbc-des-caam",
3902 .cra_blocksize = DES_BLOCK_SIZE,
3903 },
3904 .setkey = aead_setkey,
3905 .setauthsize = aead_setauthsize,
3906 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003907 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003908 .ivsize = DES_BLOCK_SIZE,
3909 .maxauthsize = SHA224_DIGEST_SIZE,
3910 },
3911 .caam = {
3912 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3913 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3914 OP_ALG_AAI_HMAC_PRECOMP,
3915 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3916 .geniv = true,
3917 },
3918 },
3919 {
3920 .aead = {
3921 .base = {
3922 .cra_name = "authenc(hmac(sha256),cbc(des))",
3923 .cra_driver_name = "authenc-hmac-sha256-"
3924 "cbc-des-caam",
3925 .cra_blocksize = DES_BLOCK_SIZE,
3926 },
3927 .setkey = aead_setkey,
3928 .setauthsize = aead_setauthsize,
3929 .encrypt = aead_encrypt,
3930 .decrypt = aead_decrypt,
3931 .ivsize = DES_BLOCK_SIZE,
3932 .maxauthsize = SHA256_DIGEST_SIZE,
3933 },
3934 .caam = {
3935 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3936 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3937 OP_ALG_AAI_HMAC_PRECOMP,
3938 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3939 },
3940 },
3941 {
3942 .aead = {
3943 .base = {
3944 .cra_name = "echainiv(authenc(hmac(sha256),"
3945 "cbc(des)))",
3946 .cra_driver_name = "echainiv-authenc-"
3947 "hmac-sha256-cbc-des-caam",
3948 .cra_blocksize = DES_BLOCK_SIZE,
3949 },
3950 .setkey = aead_setkey,
3951 .setauthsize = aead_setauthsize,
3952 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003953 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08003954 .ivsize = DES_BLOCK_SIZE,
3955 .maxauthsize = SHA256_DIGEST_SIZE,
3956 },
3957 .caam = {
3958 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3959 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3960 OP_ALG_AAI_HMAC_PRECOMP,
3961 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3962 .geniv = true,
3963 },
3964 },
3965 {
3966 .aead = {
3967 .base = {
3968 .cra_name = "authenc(hmac(sha384),cbc(des))",
3969 .cra_driver_name = "authenc-hmac-sha384-"
3970 "cbc-des-caam",
3971 .cra_blocksize = DES_BLOCK_SIZE,
3972 },
3973 .setkey = aead_setkey,
3974 .setauthsize = aead_setauthsize,
3975 .encrypt = aead_encrypt,
3976 .decrypt = aead_decrypt,
3977 .ivsize = DES_BLOCK_SIZE,
3978 .maxauthsize = SHA384_DIGEST_SIZE,
3979 },
3980 .caam = {
3981 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3982 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3983 OP_ALG_AAI_HMAC_PRECOMP,
3984 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3985 },
3986 },
3987 {
3988 .aead = {
3989 .base = {
3990 .cra_name = "echainiv(authenc(hmac(sha384),"
3991 "cbc(des)))",
3992 .cra_driver_name = "echainiv-authenc-"
3993 "hmac-sha384-cbc-des-caam",
3994 .cra_blocksize = DES_BLOCK_SIZE,
3995 },
3996 .setkey = aead_setkey,
3997 .setauthsize = aead_setauthsize,
3998 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03003999 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004000 .ivsize = DES_BLOCK_SIZE,
4001 .maxauthsize = SHA384_DIGEST_SIZE,
4002 },
4003 .caam = {
4004 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4005 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4006 OP_ALG_AAI_HMAC_PRECOMP,
4007 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4008 .geniv = true,
4009 },
4010 },
4011 {
4012 .aead = {
4013 .base = {
4014 .cra_name = "authenc(hmac(sha512),cbc(des))",
4015 .cra_driver_name = "authenc-hmac-sha512-"
4016 "cbc-des-caam",
4017 .cra_blocksize = DES_BLOCK_SIZE,
4018 },
4019 .setkey = aead_setkey,
4020 .setauthsize = aead_setauthsize,
4021 .encrypt = aead_encrypt,
4022 .decrypt = aead_decrypt,
4023 .ivsize = DES_BLOCK_SIZE,
4024 .maxauthsize = SHA512_DIGEST_SIZE,
4025 },
4026 .caam = {
4027 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4028 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4029 OP_ALG_AAI_HMAC_PRECOMP,
4030 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4031 },
4032 },
4033 {
4034 .aead = {
4035 .base = {
4036 .cra_name = "echainiv(authenc(hmac(sha512),"
4037 "cbc(des)))",
4038 .cra_driver_name = "echainiv-authenc-"
4039 "hmac-sha512-cbc-des-caam",
4040 .cra_blocksize = DES_BLOCK_SIZE,
4041 },
4042 .setkey = aead_setkey,
4043 .setauthsize = aead_setauthsize,
4044 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004045 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004046 .ivsize = DES_BLOCK_SIZE,
4047 .maxauthsize = SHA512_DIGEST_SIZE,
4048 },
4049 .caam = {
4050 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4051 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4052 OP_ALG_AAI_HMAC_PRECOMP,
4053 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4054 .geniv = true,
4055 },
4056 },
4057 {
4058 .aead = {
4059 .base = {
4060 .cra_name = "authenc(hmac(md5),"
4061 "rfc3686(ctr(aes)))",
4062 .cra_driver_name = "authenc-hmac-md5-"
4063 "rfc3686-ctr-aes-caam",
4064 .cra_blocksize = 1,
4065 },
4066 .setkey = aead_setkey,
4067 .setauthsize = aead_setauthsize,
4068 .encrypt = aead_encrypt,
4069 .decrypt = aead_decrypt,
4070 .ivsize = CTR_RFC3686_IV_SIZE,
4071 .maxauthsize = MD5_DIGEST_SIZE,
4072 },
4073 .caam = {
4074 .class1_alg_type = OP_ALG_ALGSEL_AES |
4075 OP_ALG_AAI_CTR_MOD128,
4076 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4077 OP_ALG_AAI_HMAC_PRECOMP,
4078 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4079 .rfc3686 = true,
4080 },
4081 },
4082 {
4083 .aead = {
4084 .base = {
4085 .cra_name = "seqiv(authenc("
4086 "hmac(md5),rfc3686(ctr(aes))))",
4087 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4088 "rfc3686-ctr-aes-caam",
4089 .cra_blocksize = 1,
4090 },
4091 .setkey = aead_setkey,
4092 .setauthsize = aead_setauthsize,
4093 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004094 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004095 .ivsize = CTR_RFC3686_IV_SIZE,
4096 .maxauthsize = MD5_DIGEST_SIZE,
4097 },
4098 .caam = {
4099 .class1_alg_type = OP_ALG_ALGSEL_AES |
4100 OP_ALG_AAI_CTR_MOD128,
4101 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4102 OP_ALG_AAI_HMAC_PRECOMP,
4103 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4104 .rfc3686 = true,
4105 .geniv = true,
4106 },
4107 },
4108 {
4109 .aead = {
4110 .base = {
4111 .cra_name = "authenc(hmac(sha1),"
4112 "rfc3686(ctr(aes)))",
4113 .cra_driver_name = "authenc-hmac-sha1-"
4114 "rfc3686-ctr-aes-caam",
4115 .cra_blocksize = 1,
4116 },
4117 .setkey = aead_setkey,
4118 .setauthsize = aead_setauthsize,
4119 .encrypt = aead_encrypt,
4120 .decrypt = aead_decrypt,
4121 .ivsize = CTR_RFC3686_IV_SIZE,
4122 .maxauthsize = SHA1_DIGEST_SIZE,
4123 },
4124 .caam = {
4125 .class1_alg_type = OP_ALG_ALGSEL_AES |
4126 OP_ALG_AAI_CTR_MOD128,
4127 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4128 OP_ALG_AAI_HMAC_PRECOMP,
4129 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4130 .rfc3686 = true,
4131 },
4132 },
4133 {
4134 .aead = {
4135 .base = {
4136 .cra_name = "seqiv(authenc("
4137 "hmac(sha1),rfc3686(ctr(aes))))",
4138 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4139 "rfc3686-ctr-aes-caam",
4140 .cra_blocksize = 1,
4141 },
4142 .setkey = aead_setkey,
4143 .setauthsize = aead_setauthsize,
4144 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004145 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004146 .ivsize = CTR_RFC3686_IV_SIZE,
4147 .maxauthsize = SHA1_DIGEST_SIZE,
4148 },
4149 .caam = {
4150 .class1_alg_type = OP_ALG_ALGSEL_AES |
4151 OP_ALG_AAI_CTR_MOD128,
4152 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4153 OP_ALG_AAI_HMAC_PRECOMP,
4154 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4155 .rfc3686 = true,
4156 .geniv = true,
4157 },
4158 },
4159 {
4160 .aead = {
4161 .base = {
4162 .cra_name = "authenc(hmac(sha224),"
4163 "rfc3686(ctr(aes)))",
4164 .cra_driver_name = "authenc-hmac-sha224-"
4165 "rfc3686-ctr-aes-caam",
4166 .cra_blocksize = 1,
4167 },
4168 .setkey = aead_setkey,
4169 .setauthsize = aead_setauthsize,
4170 .encrypt = aead_encrypt,
4171 .decrypt = aead_decrypt,
4172 .ivsize = CTR_RFC3686_IV_SIZE,
4173 .maxauthsize = SHA224_DIGEST_SIZE,
4174 },
4175 .caam = {
4176 .class1_alg_type = OP_ALG_ALGSEL_AES |
4177 OP_ALG_AAI_CTR_MOD128,
4178 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4179 OP_ALG_AAI_HMAC_PRECOMP,
4180 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4181 .rfc3686 = true,
4182 },
4183 },
4184 {
4185 .aead = {
4186 .base = {
4187 .cra_name = "seqiv(authenc("
4188 "hmac(sha224),rfc3686(ctr(aes))))",
4189 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4190 "rfc3686-ctr-aes-caam",
4191 .cra_blocksize = 1,
4192 },
4193 .setkey = aead_setkey,
4194 .setauthsize = aead_setauthsize,
4195 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004196 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004197 .ivsize = CTR_RFC3686_IV_SIZE,
4198 .maxauthsize = SHA224_DIGEST_SIZE,
4199 },
4200 .caam = {
4201 .class1_alg_type = OP_ALG_ALGSEL_AES |
4202 OP_ALG_AAI_CTR_MOD128,
4203 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4204 OP_ALG_AAI_HMAC_PRECOMP,
4205 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4206 .rfc3686 = true,
4207 .geniv = true,
4208 },
4209 },
4210 {
4211 .aead = {
4212 .base = {
4213 .cra_name = "authenc(hmac(sha256),"
4214 "rfc3686(ctr(aes)))",
4215 .cra_driver_name = "authenc-hmac-sha256-"
4216 "rfc3686-ctr-aes-caam",
4217 .cra_blocksize = 1,
4218 },
4219 .setkey = aead_setkey,
4220 .setauthsize = aead_setauthsize,
4221 .encrypt = aead_encrypt,
4222 .decrypt = aead_decrypt,
4223 .ivsize = CTR_RFC3686_IV_SIZE,
4224 .maxauthsize = SHA256_DIGEST_SIZE,
4225 },
4226 .caam = {
4227 .class1_alg_type = OP_ALG_ALGSEL_AES |
4228 OP_ALG_AAI_CTR_MOD128,
4229 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4230 OP_ALG_AAI_HMAC_PRECOMP,
4231 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4232 .rfc3686 = true,
4233 },
4234 },
4235 {
4236 .aead = {
4237 .base = {
4238 .cra_name = "seqiv(authenc(hmac(sha256),"
4239 "rfc3686(ctr(aes))))",
4240 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4241 "rfc3686-ctr-aes-caam",
4242 .cra_blocksize = 1,
4243 },
4244 .setkey = aead_setkey,
4245 .setauthsize = aead_setauthsize,
4246 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004247 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004248 .ivsize = CTR_RFC3686_IV_SIZE,
4249 .maxauthsize = SHA256_DIGEST_SIZE,
4250 },
4251 .caam = {
4252 .class1_alg_type = OP_ALG_ALGSEL_AES |
4253 OP_ALG_AAI_CTR_MOD128,
4254 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4255 OP_ALG_AAI_HMAC_PRECOMP,
4256 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4257 .rfc3686 = true,
4258 .geniv = true,
4259 },
4260 },
4261 {
4262 .aead = {
4263 .base = {
4264 .cra_name = "authenc(hmac(sha384),"
4265 "rfc3686(ctr(aes)))",
4266 .cra_driver_name = "authenc-hmac-sha384-"
4267 "rfc3686-ctr-aes-caam",
4268 .cra_blocksize = 1,
4269 },
4270 .setkey = aead_setkey,
4271 .setauthsize = aead_setauthsize,
4272 .encrypt = aead_encrypt,
4273 .decrypt = aead_decrypt,
4274 .ivsize = CTR_RFC3686_IV_SIZE,
4275 .maxauthsize = SHA384_DIGEST_SIZE,
4276 },
4277 .caam = {
4278 .class1_alg_type = OP_ALG_ALGSEL_AES |
4279 OP_ALG_AAI_CTR_MOD128,
4280 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4281 OP_ALG_AAI_HMAC_PRECOMP,
4282 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4283 .rfc3686 = true,
4284 },
4285 },
4286 {
4287 .aead = {
4288 .base = {
4289 .cra_name = "seqiv(authenc(hmac(sha384),"
4290 "rfc3686(ctr(aes))))",
4291 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4292 "rfc3686-ctr-aes-caam",
4293 .cra_blocksize = 1,
4294 },
4295 .setkey = aead_setkey,
4296 .setauthsize = aead_setauthsize,
4297 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004298 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004299 .ivsize = CTR_RFC3686_IV_SIZE,
4300 .maxauthsize = SHA384_DIGEST_SIZE,
4301 },
4302 .caam = {
4303 .class1_alg_type = OP_ALG_ALGSEL_AES |
4304 OP_ALG_AAI_CTR_MOD128,
4305 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4306 OP_ALG_AAI_HMAC_PRECOMP,
4307 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4308 .rfc3686 = true,
4309 .geniv = true,
4310 },
4311 },
4312 {
4313 .aead = {
4314 .base = {
4315 .cra_name = "authenc(hmac(sha512),"
4316 "rfc3686(ctr(aes)))",
4317 .cra_driver_name = "authenc-hmac-sha512-"
4318 "rfc3686-ctr-aes-caam",
4319 .cra_blocksize = 1,
4320 },
4321 .setkey = aead_setkey,
4322 .setauthsize = aead_setauthsize,
4323 .encrypt = aead_encrypt,
4324 .decrypt = aead_decrypt,
4325 .ivsize = CTR_RFC3686_IV_SIZE,
4326 .maxauthsize = SHA512_DIGEST_SIZE,
4327 },
4328 .caam = {
4329 .class1_alg_type = OP_ALG_ALGSEL_AES |
4330 OP_ALG_AAI_CTR_MOD128,
4331 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4332 OP_ALG_AAI_HMAC_PRECOMP,
4333 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4334 .rfc3686 = true,
4335 },
4336 },
4337 {
4338 .aead = {
4339 .base = {
4340 .cra_name = "seqiv(authenc(hmac(sha512),"
4341 "rfc3686(ctr(aes))))",
4342 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4343 "rfc3686-ctr-aes-caam",
4344 .cra_blocksize = 1,
4345 },
4346 .setkey = aead_setkey,
4347 .setauthsize = aead_setauthsize,
4348 .encrypt = aead_encrypt,
Horia Geantă8b18e232016-08-29 14:52:14 +03004349 .decrypt = aead_decrypt,
Herbert Xu479bcc72015-07-30 17:53:17 +08004350 .ivsize = CTR_RFC3686_IV_SIZE,
4351 .maxauthsize = SHA512_DIGEST_SIZE,
4352 },
4353 .caam = {
4354 .class1_alg_type = OP_ALG_ALGSEL_AES |
4355 OP_ALG_AAI_CTR_MOD128,
4356 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4357 OP_ALG_AAI_HMAC_PRECOMP,
4358 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4359 .rfc3686 = true,
4360 .geniv = true,
4361 },
4362 },
Herbert Xuf2147b82015-06-16 13:54:23 +08004363};
4364
4365struct caam_crypto_alg {
4366 struct crypto_alg crypto_alg;
4367 struct list_head entry;
4368 struct caam_alg_entry caam;
4369};
4370
4371static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4372{
4373 ctx->jrdev = caam_jr_alloc();
4374 if (IS_ERR(ctx->jrdev)) {
4375 pr_err("Job Ring Device allocation for transform failed\n");
4376 return PTR_ERR(ctx->jrdev);
4377 }
4378
4379 /* copy descriptor header template value */
4380 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4381 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4382 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4383
4384 return 0;
4385}
4386
Kim Phillips8e8ec592011-03-13 16:54:26 +08004387static int caam_cra_init(struct crypto_tfm *tfm)
4388{
4389 struct crypto_alg *alg = tfm->__crt_alg;
4390 struct caam_crypto_alg *caam_alg =
4391 container_of(alg, struct caam_crypto_alg, crypto_alg);
4392 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004393
Herbert Xuf2147b82015-06-16 13:54:23 +08004394 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004395}
4396
Herbert Xuf2147b82015-06-16 13:54:23 +08004397static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004398{
Herbert Xuf2147b82015-06-16 13:54:23 +08004399 struct aead_alg *alg = crypto_aead_alg(tfm);
4400 struct caam_aead_alg *caam_alg =
4401 container_of(alg, struct caam_aead_alg, aead);
4402 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004403
Herbert Xuf2147b82015-06-16 13:54:23 +08004404 return caam_init_common(ctx, &caam_alg->caam);
4405}
4406
4407static void caam_exit_common(struct caam_ctx *ctx)
4408{
Yuan Kang1acebad2011-07-15 11:21:42 +08004409 if (ctx->sh_desc_enc_dma &&
4410 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4411 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4412 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4413 if (ctx->sh_desc_dec_dma &&
4414 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4415 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4416 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4417 if (ctx->sh_desc_givenc_dma &&
4418 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4419 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4420 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05004421 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02004422 if (ctx->key_dma &&
4423 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4424 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4425 ctx->enckeylen + ctx->split_key_pad_len,
4426 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304427
4428 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004429}
4430
Herbert Xuf2147b82015-06-16 13:54:23 +08004431static void caam_cra_exit(struct crypto_tfm *tfm)
4432{
4433 caam_exit_common(crypto_tfm_ctx(tfm));
4434}
4435
4436static void caam_aead_exit(struct crypto_aead *tfm)
4437{
4438 caam_exit_common(crypto_aead_ctx(tfm));
4439}
4440
Kim Phillips8e8ec592011-03-13 16:54:26 +08004441static void __exit caam_algapi_exit(void)
4442{
4443
Kim Phillips8e8ec592011-03-13 16:54:26 +08004444 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08004445 int i;
4446
4447 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4448 struct caam_aead_alg *t_alg = driver_aeads + i;
4449
4450 if (t_alg->registered)
4451 crypto_unregister_aead(&t_alg->aead);
4452 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004453
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304454 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004455 return;
4456
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304457 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004458 crypto_unregister_alg(&t_alg->crypto_alg);
4459 list_del(&t_alg->entry);
4460 kfree(t_alg);
4461 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004462}
4463
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304464static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08004465 *template)
4466{
4467 struct caam_crypto_alg *t_alg;
4468 struct crypto_alg *alg;
4469
Fabio Estevam9c4f9732015-08-21 13:52:00 -03004470 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004471 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304472 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004473 return ERR_PTR(-ENOMEM);
4474 }
4475
4476 alg = &t_alg->crypto_alg;
4477
4478 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4479 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4480 template->driver_name);
4481 alg->cra_module = THIS_MODULE;
4482 alg->cra_init = caam_cra_init;
4483 alg->cra_exit = caam_cra_exit;
4484 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004485 alg->cra_blocksize = template->blocksize;
4486 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004487 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01004488 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4489 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08004490 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004491 case CRYPTO_ALG_TYPE_GIVCIPHER:
4492 alg->cra_type = &crypto_givcipher_type;
4493 alg->cra_ablkcipher = template->template_ablkcipher;
4494 break;
Yuan Kangacdca312011-07-15 11:21:42 +08004495 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4496 alg->cra_type = &crypto_ablkcipher_type;
4497 alg->cra_ablkcipher = template->template_ablkcipher;
4498 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08004499 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004500
Herbert Xuf2147b82015-06-16 13:54:23 +08004501 t_alg->caam.class1_alg_type = template->class1_alg_type;
4502 t_alg->caam.class2_alg_type = template->class2_alg_type;
4503 t_alg->caam.alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004504
4505 return t_alg;
4506}
4507
Herbert Xuf2147b82015-06-16 13:54:23 +08004508static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4509{
4510 struct aead_alg *alg = &t_alg->aead;
4511
4512 alg->base.cra_module = THIS_MODULE;
4513 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4514 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu5e4b8c12015-08-13 17:29:06 +08004515 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Herbert Xuf2147b82015-06-16 13:54:23 +08004516
4517 alg->init = caam_aead_init;
4518 alg->exit = caam_aead_exit;
4519}
4520
Kim Phillips8e8ec592011-03-13 16:54:26 +08004521static int __init caam_algapi_init(void)
4522{
Ruchika Gupta35af6402014-07-07 10:42:12 +05304523 struct device_node *dev_node;
4524 struct platform_device *pdev;
4525 struct device *ctrldev;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004526 struct caam_drv_private *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004527 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004528 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4529 unsigned int md_limit = SHA512_DIGEST_SIZE;
Herbert Xuf2147b82015-06-16 13:54:23 +08004530 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004531
Ruchika Gupta35af6402014-07-07 10:42:12 +05304532 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4533 if (!dev_node) {
4534 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4535 if (!dev_node)
4536 return -ENODEV;
4537 }
4538
4539 pdev = of_find_device_by_node(dev_node);
4540 if (!pdev) {
4541 of_node_put(dev_node);
4542 return -ENODEV;
4543 }
4544
4545 ctrldev = &pdev->dev;
4546 priv = dev_get_drvdata(ctrldev);
4547 of_node_put(dev_node);
4548
4549 /*
4550 * If priv is NULL, it's probably because the caam driver wasn't
4551 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4552 */
4553 if (!priv)
4554 return -ENODEV;
4555
4556
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304557 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004558
Victoria Milhoanbf834902015-08-05 11:28:48 -07004559 /*
4560 * Register crypto algorithms the device supports.
4561 * First, detect presence and attributes of DES, AES, and MD blocks.
4562 */
4563 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4564 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4565 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4566 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4567 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004568
Victoria Milhoanbf834902015-08-05 11:28:48 -07004569 /* If MD is present, limit digest size based on LP256 */
4570 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4571 md_limit = SHA256_DIGEST_SIZE;
4572
4573 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4574 struct caam_crypto_alg *t_alg;
4575 struct caam_alg_template *alg = driver_algs + i;
4576 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4577
4578 /* Skip DES algorithms if not supported by device */
4579 if (!des_inst &&
4580 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4581 (alg_sel == OP_ALG_ALGSEL_DES)))
4582 continue;
4583
4584 /* Skip AES algorithms if not supported by device */
4585 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4586 continue;
4587
4588 t_alg = caam_alg_alloc(alg);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004589 if (IS_ERR(t_alg)) {
4590 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07004591 pr_warn("%s alg allocation failed\n", alg->driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004592 continue;
4593 }
4594
4595 err = crypto_register_alg(&t_alg->crypto_alg);
4596 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304597 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004598 t_alg->crypto_alg.cra_driver_name);
4599 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08004600 continue;
4601 }
4602
4603 list_add_tail(&t_alg->entry, &alg_list);
4604 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004605 }
Herbert Xuf2147b82015-06-16 13:54:23 +08004606
4607 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4608 struct caam_aead_alg *t_alg = driver_aeads + i;
Victoria Milhoanbf834902015-08-05 11:28:48 -07004609 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4610 OP_ALG_ALGSEL_MASK;
4611 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4612 OP_ALG_ALGSEL_MASK;
4613 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4614
4615 /* Skip DES algorithms if not supported by device */
4616 if (!des_inst &&
4617 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4618 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4619 continue;
4620
4621 /* Skip AES algorithms if not supported by device */
4622 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4623 continue;
4624
4625 /*
4626 * Check support for AES algorithms not available
4627 * on LP devices.
4628 */
4629 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4630 if (alg_aai == OP_ALG_AAI_GCM)
4631 continue;
4632
4633 /*
4634 * Skip algorithms requiring message digests
4635 * if MD or MD size is not supported by device.
4636 */
4637 if (c2_alg_sel &&
4638 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4639 continue;
Herbert Xuf2147b82015-06-16 13:54:23 +08004640
4641 caam_aead_alg_init(t_alg);
4642
4643 err = crypto_register_aead(&t_alg->aead);
4644 if (err) {
4645 pr_warn("%s alg registration failed\n",
4646 t_alg->aead.base.cra_driver_name);
4647 continue;
4648 }
4649
4650 t_alg->registered = true;
4651 registered = true;
4652 }
4653
4654 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304655 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004656
4657 return err;
4658}
4659
4660module_init(caam_algapi_init);
4661module_exit(caam_algapi_exit);
4662
4663MODULE_LICENSE("GPL");
4664MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4665MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");